From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 88A3524678B for ; Wed, 28 May 2025 08:09:52 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419792; cv=none; b=PDc4e4CeFGyGOL0O/o2P+DZZYjhjLB+p/HzMjqzXAxfzwdsvxpdjDjaJ5MlX8AYvp2+XpbDsFrWwp4++h7dsG5nx3VKmOeF+CMmQddJGtE0McdTrI3G6AV/WMZti01ODK+vFwnjuULhoIAfdoyYVmc2sul3kDrjzt+6D3nt4nZg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419792; c=relaxed/simple; bh=24zupqe2qOSiEYLckn16iIs+jmiZq7/UI4fvu5kVXUU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=rPT7oR1EpSIVEamxXbRAbROGcZx0jVQpahkRfMdwWSncKTQBdeNN2EsNrWU9Z6rWhSITv3EguP3hCAY2Z+TXI2nLUPet2cEnf9rNSBeUc7ZDIkIx8eBD0R3+me6v5i1lirO/v6a64RJGY/yViWLQR10EvrGjtSQcf2U2HRHRseA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=fg0cKyeK; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="fg0cKyeK" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E9AE9C4CEEF; Wed, 28 May 2025 08:09:48 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419792; bh=24zupqe2qOSiEYLckn16iIs+jmiZq7/UI4fvu5kVXUU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=fg0cKyeKF+6/h4C/4L92RMe65kTSDX67RAKFst53zrqGb6NVI+r7LwBXi4fIvZjpz OhnuTfJxfkPVKNsqR2QYbFeJSe+gTYI6gr+m/aD/pzgMlW1aW/ogRnlYi6+70vxCX/ mOSxjvG4+p6LgbraA3ywmztj0Zj6tnPBfl1BnZIUxPwsPj+ocWdWHgwrAhA68NUR+S N14zoC+EhyoSfSmuaE09esNUXGfBmgcaVKapTi/wuNY/6i6aHlv7+5fajm1vyQidvU IkMV8nbJMWJGCOwcD1J3Nf9fHtRtrxQiAiA51kfFrbvIvdLLkN0n8C4qi8gyQdNogS riYSB/AGgRTDg== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 01/43] sched: Clean up and standardize #if/#else/#endif markers in sched/autogroup.[ch] Date: Wed, 28 May 2025 10:08:42 +0200 Message-ID: <20250528080924.2273858-2-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/autogroup.c | 6 +++--- kernel/sched/autogroup.h | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 2b331822c7e7..ad2b218e4657 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -25,9 +25,9 @@ static void __init sched_autogroup_sysctl_init(void) { register_sysctl_init("kernel", sched_autogroup_sysctls); } -#else +#else /* !CONFIG_SYSCTL: */ #define sched_autogroup_sysctl_init() do { } while (0) -#endif +#endif /* !CONFIG_SYSCTL */ =20 void __init autogroup_init(struct task_struct *init_task) { @@ -108,7 +108,7 @@ static inline struct autogroup *autogroup_create(void) free_rt_sched_group(tg); tg->rt_se =3D root_task_group.rt_se; tg->rt_rq =3D root_task_group.rt_rq; -#endif +#endif /* CONFIG_RT_GROUP_SCHED */ tg->autogroup =3D ag; =20 sched_online_group(tg, &root_task_group); diff --git a/kernel/sched/autogroup.h b/kernel/sched/autogroup.h index 90d69f2c5eaf..0ad3f71e1bfa 100644 --- a/kernel/sched/autogroup.h +++ b/kernel/sched/autogroup.h @@ -41,7 +41,7 @@ autogroup_task_group(struct task_struct *p, struct task_g= roup *tg) =20 extern int autogroup_path(struct task_group *tg, char *buf, int buflen); =20 -#else /* !CONFIG_SCHED_AUTOGROUP */ +#else /* !CONFIG_SCHED_AUTOGROUP: */ =20 static inline void autogroup_init(struct task_struct *init_task) { } static inline void autogroup_free(struct task_group *tg) { } @@ -61,6 +61,6 @@ static inline int autogroup_path(struct task_group *tg, c= har *buf, int buflen) return 0; } =20 -#endif /* CONFIG_SCHED_AUTOGROUP */ +#endif /* !CONFIG_SCHED_AUTOGROUP */ =20 #endif /* _KERNEL_SCHED_AUTOGROUP_H */ --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 15C1B2459EA for ; Wed, 28 May 2025 08:09:55 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419796; cv=none; b=Gc+3rF+Hq5M3um59T479wRvBW+Y3h4mMNvxS1H412AjKSZPK6zCQGBYxIcYVlXJR/J2glVD8Hi8OJ6oSYqrz4JhfTwoaBoACTEOLT7mcnNUgb89H/D0uVbsb0Tc20XUDlhp+0adh+x7v9rVyBoukDgoL/rYnxprmHS1yfbrlpCs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419796; c=relaxed/simple; bh=aHNjuT+pVXde7vagc9ohxmkVnGnBYIIpr8O000cJReU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=RWZabxsgd3yuQvMljlPpi5uTgmEZ/Hy2EXodDTxnU6SioIlQ3R/0YNXygilbNAIFeB+F8NG+3OphqtkeWYjrwYLElwDpl7K8+vC/GKULlwKqVtuZ9ZaYqU1dGquWW9V72NFn2QlKpc2cKMotlNRL0hjsnzTOP0P08md1TYn+Q7I= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=uqs8g7+3; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="uqs8g7+3" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 77AC3C4CEF0; Wed, 28 May 2025 08:09:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419795; bh=aHNjuT+pVXde7vagc9ohxmkVnGnBYIIpr8O000cJReU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=uqs8g7+345gBU4LRzjilwc57vFuv5iAmjItNZfc5qb4iXPDBd0SqgC8AA9GJ7S97c H8Ny+5nSpUlexxE+I2rtRF2izr6Mvf9A2+kM84CC2ElJo1EWZQK5FmkyuDtHjdnJUU ktZMysdXtWM3LFQU7pF0wHwFYFS5xYFWwP+vBVRcICpkqT3ymiL/71vc/x20Uhlkph 2+MduEVQB2AZ4jmAHIQebN1pF0ClaB61PRDK6qjoRLFHXpyIyaGd5i+LgoZ1dU4Seq 6sp5eX829+cPHFyWVb6FBFTN8pUjJpvGUkMsAnLIaqUvvJFkD0NrX7uk/CpwTMAWxY NDOUPzX4xLGyQ== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 02/43] sched: Clean up and standardize #if/#else/#endif markers in sched/clock.c Date: Wed, 28 May 2025 10:08:43 +0200 Message-ID: <20250528080924.2273858-3-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/clock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index a09655b48140..3406145925b1 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -471,7 +471,7 @@ notrace void sched_clock_idle_wakeup_event(void) } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); =20 -#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ +#else /* !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK: */ =20 void __init sched_clock_init(void) { @@ -489,7 +489,7 @@ notrace u64 sched_clock_cpu(int cpu) return sched_clock(); } =20 -#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ +#endif /* !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ =20 /* * Running clock - returns the time that has elapsed while a guest has been --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 29D12246776 for ; Wed, 28 May 2025 08:09:59 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419799; cv=none; b=Fr9zQHC03B4xVNmwTCHd7l9ZhjSmYeHNdCUsknVTF5SRNdqU1UFF5fdFWG/5o4VgGp6PTTII0fFX+vGwBljhef2Qqaz0VpemBmMpn2OVFsnqbAcl5VJgj8cdWFPZ265mLFwqSQX4Yeoe8xucNdKXUFCvLmRR91tZfL1MV/2gtbM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419799; c=relaxed/simple; bh=9oM9khvttOQss4/2+9tZ2ORsA+VPp+9MA1k64OXpZ2o=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=HFh0xgST9CEJER8dkkvlXwaSf/a1kEzhWQG3q3UJYAOlo3HcMEnMnfNo0EjzgtSOormYF+nM+Lz7nqm7s3f+t7RRPZTcRWk4z95A/5Zbt3agRTIYoQW371MvEgkP6I3Gkk5iFkuEsr43Y/4KlO8SappQwnNgqFA3Yg/d+rRSVSw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=KttRv5MX; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="KttRv5MX" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 04205C4CEF1; Wed, 28 May 2025 08:09:55 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419799; bh=9oM9khvttOQss4/2+9tZ2ORsA+VPp+9MA1k64OXpZ2o=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=KttRv5MXMGa7qjdTGOpYNWeV3IixN0gqzUe+0B/DXZcB39+KlOncfOqH3Gv75INdD lsOoovXyVkZc2JzKoiESbtbUbpf4PMjlOTaeOlZf79cpMTeDqueMtAMVyD7TFQzJG4 WoKP7TLTnAPoVz6SIzcthZOLaM1sLH53rB5Q3USQsBIbo+16GmBupuRTyduOTrKayo nhHpcAj2q2w6P/TxVRL4ABnFIjSouS3EeHjocugFHy950klExR9Ndo9czS1ULkyBFb 0jfAAIPL3q0hVgidkSmNLIBZOPrKS9FjRLzQwJgvM6mHe6EQJUcgxgN5Nbo70joUKc yc/PMPKgYRL2g== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 03/43] sched: Clean up and standardize #if/#else/#endif markers in sched/core.c Date: Wed, 28 May 2025 10:08:44 +0200 Message-ID: <20250528080924.2273858-4-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Apply this simplification: -#if defined(CONFIG_FOO) +#ifdef CONFIG_FOO - Fix whitespace noise. - Use vertical alignment to better visualize nested #ifdef blocks, where appropriate: #ifdef CONFIG_FOO # ifdef CONFIG_BAR ... # endif #endif Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/core.c | 186 ++++++++++++++++++++++++++----------------------= ---- 1 file changed, 93 insertions(+), 93 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 62b3416f5e43..21cf5809b2d8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -481,13 +481,13 @@ void sched_core_put(void) schedule_work(&_work); } =20 -#else /* !CONFIG_SCHED_CORE */ +#else /* !CONFIG_SCHED_CORE: */ =20 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p= ) { } static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } =20 -#endif /* CONFIG_SCHED_CORE */ +#endif /* !CONFIG_SCHED_CORE */ =20 /* need a wrapper since we may need to trace from modules */ EXPORT_TRACEPOINT_SYMBOL(sched_set_state_tp); @@ -667,7 +667,7 @@ void double_rq_lock(struct rq *rq1, struct rq *rq2) =20 double_rq_clock_clear_update(rq1, rq2); } -#endif +#endif /* CONFIG_SMP */ =20 /* * __task_rq_lock - lock the rq @p resides on. @@ -899,7 +899,7 @@ void hrtick_start(struct rq *rq, u64 delay) smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); } =20 -#else +#else /* !CONFIG_SMP: */ /* * Called to set the hrtick timer state. * @@ -916,7 +916,7 @@ void hrtick_start(struct rq *rq, u64 delay) HRTIMER_MODE_REL_PINNED_HARD); } =20 -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 static void hrtick_rq_init(struct rq *rq) { @@ -925,7 +925,7 @@ static void hrtick_rq_init(struct rq *rq) #endif hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_RE= L_HARD); } -#else /* CONFIG_SCHED_HRTICK */ +#else /* !CONFIG_SCHED_HRTICK: */ static inline void hrtick_clear(struct rq *rq) { } @@ -933,7 +933,7 @@ static inline void hrtick_clear(struct rq *rq) static inline void hrtick_rq_init(struct rq *rq) { } -#endif /* CONFIG_SCHED_HRTICK */ +#endif /* !CONFIG_SCHED_HRTICK */ =20 /* * try_cmpxchg based fetch_or() macro so it works for different integer ty= pes: @@ -1971,7 +1971,7 @@ static int sysctl_sched_uclamp_handler(const struct c= tl_table *table, int write, sysctl_sched_uclamp_util_min_rt_default =3D old_min_rt; return result; } -#endif +#endif /* CONFIG_SYSCTL */ =20 static void uclamp_fork(struct task_struct *p) { @@ -2037,13 +2037,13 @@ static void __init init_uclamp(void) } } =20 -#else /* !CONFIG_UCLAMP_TASK */ +#else /* !CONFIG_UCLAMP_TASK: */ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p, int= flags) { } static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } static inline void uclamp_fork(struct task_struct *p) { } static inline void uclamp_post_fork(struct task_struct *p) { } static inline void init_uclamp(void) { } -#endif /* CONFIG_UCLAMP_TASK */ +#endif /* !CONFIG_UCLAMP_TASK */ =20 bool sched_task_on_rq(struct task_struct *p) { @@ -3657,7 +3657,7 @@ void sched_set_stop_task(int cpu, struct task_struct = *stop) } } =20 -#else /* CONFIG_SMP */ +#else /* !CONFIG_SMP: */ =20 static inline void migrate_disable_switch(struct rq *rq, struct task_struc= t *p) { } =20 @@ -3766,7 +3766,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p= , int wake_flags, =20 rq->idle_stamp =3D 0; } -#endif +#endif /* CONFIG_SMP */ } =20 /* @@ -3988,14 +3988,14 @@ static bool ttwu_queue_wakelist(struct task_struct = *p, int cpu, int wake_flags) return false; } =20 -#else /* !CONFIG_SMP */ +#else /* !CONFIG_SMP: */ =20 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int= wake_flags) { return false; } =20 -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) { @@ -4331,9 +4331,9 @@ int try_to_wake_up(struct task_struct *p, unsigned in= t state, int wake_flags) psi_ttwu_dequeue(p); set_task_cpu(p, cpu); } -#else +#else /* !CONFIG_SMP: */ cpu =3D task_cpu(p); -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 ttwu_queue(p, cpu, wake_flags); } @@ -4595,8 +4595,8 @@ static int sysctl_numa_balancing(const struct ctl_tab= le *table, int write, } return err; } -#endif -#endif +#endif /* CONFIG_PROC_SYSCTL */ +#endif /* CONFIG_NUMA_BALANCING */ =20 #ifdef CONFIG_SCHEDSTATS =20 @@ -4783,7 +4783,7 @@ int sched_fork(unsigned long clone_flags, struct task= _struct *p) if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP p->on_cpu =3D 0; #endif init_task_preempt_count(p); @@ -4974,7 +4974,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *= curr, __fire_sched_out_preempt_notifiers(curr, next); } =20 -#else /* !CONFIG_PREEMPT_NOTIFIERS */ +#else /* !CONFIG_PREEMPT_NOTIFIERS: */ =20 static inline void fire_sched_in_preempt_notifiers(struct task_struct *cur= r) { @@ -4986,7 +4986,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *= curr, { } =20 -#endif /* CONFIG_PREEMPT_NOTIFIERS */ +#endif /* !CONFIG_PREEMPT_NOTIFIERS */ =20 static inline void prepare_task(struct task_struct *next) { @@ -5103,13 +5103,13 @@ void balance_callbacks(struct rq *rq, struct balanc= e_callback *head) } } =20 -#else +#else /* !CONFIG_SMP: */ =20 static inline void __balance_callbacks(struct rq *rq) { } =20 -#endif +#endif /* !CONFIG_SMP */ =20 static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_fla= gs *rf) @@ -5523,7 +5523,7 @@ void sched_exec(void) stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); } =20 -#endif +#endif /* CONFIG_SMP */ =20 DEFINE_PER_CPU(struct kernel_stat, kstat); DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); @@ -5831,10 +5831,10 @@ int __init sched_tick_offload_init(void) return 0; } =20 -#else /* !CONFIG_NO_HZ_FULL */ +#else /* !CONFIG_NO_HZ_FULL: */ static inline void sched_tick_start(int cpu) { } static inline void sched_tick_stop(int cpu) { } -#endif +#endif /* !CONFIG_NO_HZ_FULL */ =20 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ defined(CONFIG_TRACE_PREEMPT_TOGGLE)) @@ -6549,7 +6549,7 @@ static inline void sched_core_cpu_dying(unsigned int = cpu) rq->core =3D rq; } =20 -#else /* !CONFIG_SCHED_CORE */ +#else /* !CONFIG_SCHED_CORE: */ =20 static inline void sched_core_cpu_starting(unsigned int cpu) {} static inline void sched_core_cpu_deactivate(unsigned int cpu) {} @@ -6561,7 +6561,7 @@ pick_next_task(struct rq *rq, struct task_struct *pre= v, struct rq_flags *rf) return __pick_next_task(rq, prev, rf); } =20 -#endif /* CONFIG_SCHED_CORE */ +#endif /* !CONFIG_SCHED_CORE */ =20 /* * Constants for the sched_mode argument of __schedule(). @@ -6988,14 +6988,14 @@ NOKPROBE_SYMBOL(preempt_schedule); EXPORT_SYMBOL(preempt_schedule); =20 #ifdef CONFIG_PREEMPT_DYNAMIC -#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) -#ifndef preempt_schedule_dynamic_enabled -#define preempt_schedule_dynamic_enabled preempt_schedule -#define preempt_schedule_dynamic_disabled NULL -#endif +# ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL +# ifndef preempt_schedule_dynamic_enabled +# define preempt_schedule_dynamic_enabled preempt_schedule +# define preempt_schedule_dynamic_disabled NULL +# endif DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); EXPORT_STATIC_CALL_TRAMP(preempt_schedule); -#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +# elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); void __sched notrace dynamic_preempt_schedule(void) { @@ -7005,8 +7005,8 @@ void __sched notrace dynamic_preempt_schedule(void) } NOKPROBE_SYMBOL(dynamic_preempt_schedule); EXPORT_SYMBOL(dynamic_preempt_schedule); -#endif -#endif +# endif +#endif /* CONFIG_PREEMPT_DYNAMIC */ =20 /** * preempt_schedule_notrace - preempt_schedule called by tracing @@ -7061,14 +7061,14 @@ asmlinkage __visible void __sched notrace preempt_s= chedule_notrace(void) EXPORT_SYMBOL_GPL(preempt_schedule_notrace); =20 #ifdef CONFIG_PREEMPT_DYNAMIC -#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) -#ifndef preempt_schedule_notrace_dynamic_enabled -#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace -#define preempt_schedule_notrace_dynamic_disabled NULL -#endif +# if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) +# ifndef preempt_schedule_notrace_dynamic_enabled +# define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notra= ce +# define preempt_schedule_notrace_dynamic_disabled NULL +# endif DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dyna= mic_enabled); EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); -#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +# elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); void __sched notrace dynamic_preempt_schedule_notrace(void) { @@ -7078,7 +7078,7 @@ void __sched notrace dynamic_preempt_schedule_notrace= (void) } NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); -#endif +# endif #endif =20 #endif /* CONFIG_PREEMPTION */ @@ -7297,7 +7297,7 @@ void rt_mutex_setprio(struct task_struct *p, struct t= ask_struct *pi_task) =20 preempt_enable(); } -#endif +#endif /* CONFIG_RT_MUTEXES */ =20 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) int __sched __cond_resched(void) @@ -7328,17 +7328,17 @@ EXPORT_SYMBOL(__cond_resched); #endif =20 #ifdef CONFIG_PREEMPT_DYNAMIC -#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) -#define cond_resched_dynamic_enabled __cond_resched -#define cond_resched_dynamic_disabled ((void *)&__static_call_return0) +# ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL +# define cond_resched_dynamic_enabled __cond_resched +# define cond_resched_dynamic_disabled ((void *)&__static_call_return0) DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); EXPORT_STATIC_CALL_TRAMP(cond_resched); =20 -#define might_resched_dynamic_enabled __cond_resched -#define might_resched_dynamic_disabled ((void *)&__static_call_return0) +# define might_resched_dynamic_enabled __cond_resched +# define might_resched_dynamic_disabled ((void *)&__static_call_return0) DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); EXPORT_STATIC_CALL_TRAMP(might_resched); -#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +# elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); int __sched dynamic_cond_resched(void) { @@ -7356,8 +7356,8 @@ int __sched dynamic_might_resched(void) return __cond_resched(); } EXPORT_SYMBOL(dynamic_might_resched); -#endif -#endif +# endif +#endif /* CONFIG_PREEMPT_DYNAMIC */ =20 /* * __cond_resched_lock() - if a reschedule is pending, drop the given lock, @@ -7423,9 +7423,9 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write); =20 #ifdef CONFIG_PREEMPT_DYNAMIC =20 -#ifdef CONFIG_GENERIC_ENTRY -#include -#endif +# ifdef CONFIG_GENERIC_ENTRY +# include +# endif =20 /* * SC:cond_resched @@ -7480,37 +7480,37 @@ int preempt_dynamic_mode =3D preempt_dynamic_undefi= ned; =20 int sched_dynamic_mode(const char *str) { -#ifndef CONFIG_PREEMPT_RT +# ifndef CONFIG_PREEMPT_RT if (!strcmp(str, "none")) return preempt_dynamic_none; =20 if (!strcmp(str, "voluntary")) return preempt_dynamic_voluntary; -#endif +# endif =20 if (!strcmp(str, "full")) return preempt_dynamic_full; =20 -#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY +# ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY if (!strcmp(str, "lazy")) return preempt_dynamic_lazy; -#endif +# endif =20 return -EINVAL; } =20 -#define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.ke= y) -#define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f.= key) +# define preempt_dynamic_key_enable(f) static_key_enable(&sk_dynamic_##f.k= ey) +# define preempt_dynamic_key_disable(f) static_key_disable(&sk_dynamic_##f= .key) =20 -#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) -#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enable= d) -#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disab= led) -#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) -#define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f) -#define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f) -#else -#error "Unsupported PREEMPT_DYNAMIC mechanism" -#endif +# if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) +# define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enab= led) +# define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_dis= abled) +# elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +# define preempt_dynamic_enable(f) preempt_dynamic_key_enable(f) +# define preempt_dynamic_disable(f) preempt_dynamic_key_disable(f) +# else +# error "Unsupported PREEMPT_DYNAMIC mechanism" +# endif =20 static DEFINE_MUTEX(sched_dynamic_mutex); =20 @@ -7614,7 +7614,7 @@ static void __init preempt_dynamic_init(void) } } =20 -#define PREEMPT_MODEL_ACCESSOR(mode) \ +# define PREEMPT_MODEL_ACCESSOR(mode) \ bool preempt_model_##mode(void) \ { \ WARN_ON_ONCE(preempt_dynamic_mode =3D=3D preempt_dynamic_undefined); \ @@ -8118,7 +8118,7 @@ static void balance_hotplug_wait(void) TASK_UNINTERRUPTIBLE); } =20 -#else +#else /* !CONFIG_HOTPLUG_CPU: */ =20 static inline void balance_push(struct rq *rq) { @@ -8132,7 +8132,7 @@ static inline void balance_hotplug_wait(void) { } =20 -#endif /* CONFIG_HOTPLUG_CPU */ +#endif /* !CONFIG_HOTPLUG_CPU */ =20 void set_rq_online(struct rq *rq) { @@ -8441,7 +8441,7 @@ int sched_cpu_dying(unsigned int cpu) sched_core_cpu_dying(cpu); return 0; } -#endif +#endif /* CONFIG_HOTPLUG_CPU */ =20 void __init sched_init_smp(void) { @@ -8475,12 +8475,12 @@ static int __init migration_init(void) } early_initcall(migration_init); =20 -#else +#else /* !CONFIG_SMP: */ void __init sched_init_smp(void) { sched_init_granularity(); } -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 int in_sched_functions(unsigned long addr) { @@ -8632,15 +8632,15 @@ void __init sched_init(void) INIT_LIST_HEAD(&rq->cfs_tasks); =20 rq_attach_root(rq, &def_root_domain); -#ifdef CONFIG_NO_HZ_COMMON +# ifdef CONFIG_NO_HZ_COMMON rq->last_blocked_load_update_tick =3D jiffies; atomic_set(&rq->nohz_flags, 0); =20 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); -#endif -#ifdef CONFIG_HOTPLUG_CPU +# endif +# ifdef CONFIG_HOTPLUG_CPU rcuwait_init(&rq->hotplug_wait); -#endif +# endif #endif /* CONFIG_SMP */ hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); @@ -8825,7 +8825,7 @@ void __cant_sleep(const char *file, int line, int pre= empt_offset) } EXPORT_SYMBOL_GPL(__cant_sleep); =20 -#ifdef CONFIG_SMP +# ifdef CONFIG_SMP void __cant_migrate(const char *file, int line) { static unsigned long prev_jiffy; @@ -8856,8 +8856,8 @@ void __cant_migrate(const char *file, int line) add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } EXPORT_SYMBOL_GPL(__cant_migrate); -#endif -#endif +# endif /* CONFIG_SMP */ +#endif /* CONFIG_DEBUG_ATOMIC_SLEEP */ =20 #ifdef CONFIG_MAGIC_SYSRQ void normalize_rt_tasks(void) @@ -8897,7 +8897,7 @@ void normalize_rt_tasks(void) =20 #endif /* CONFIG_MAGIC_SYSRQ */ =20 -#if defined(CONFIG_KGDB_KDB) +#ifdef CONFIG_KGDB_KDB /* * These functions are only useful for KDB. * @@ -8921,7 +8921,7 @@ struct task_struct *curr_task(int cpu) return cpu_curr(cpu); } =20 -#endif /* defined(CONFIG_KGDB_KDB) */ +#endif /* CONFIG_KGDB_KDB */ =20 #ifdef CONFIG_CGROUP_SCHED /* task_group_lock serializes the addition/removal of task groups */ @@ -9802,7 +9802,7 @@ static int cpu_idle_write_s64(struct cgroup_subsys_st= ate *css, scx_group_set_idle(css_tg(css), idle); return ret; } -#endif +#endif /* CONFIG_GROUP_SCHED_WEIGHT */ =20 static struct cftype cpu_legacy_files[] =3D { #ifdef CONFIG_GROUP_SCHED_WEIGHT @@ -9930,7 +9930,7 @@ static int cpu_extra_stat_show(struct seq_file *sf, cfs_b->nr_periods, cfs_b->nr_throttled, throttled_usec, cfs_b->nr_burst, burst_usec); } -#endif +#endif /* CONFIG_CFS_BANDWIDTH */ return 0; } =20 @@ -10071,7 +10071,7 @@ static ssize_t cpu_max_write(struct kernfs_open_fil= e *of, ret =3D tg_set_cfs_bandwidth(tg, period, quota, burst); return ret ?: nbytes; } -#endif +#endif /* CONFIG_CFS_BANDWIDTH */ =20 static struct cftype cpu_files[] =3D { #ifdef CONFIG_GROUP_SCHED_WEIGHT @@ -10107,7 +10107,7 @@ static struct cftype cpu_files[] =3D { .read_u64 =3D cpu_cfs_burst_read_u64, .write_u64 =3D cpu_cfs_burst_write_u64, }, -#endif +#endif /* CONFIG_CFS_BANDWIDTH */ #ifdef CONFIG_UCLAMP_TASK_GROUP { .name =3D "uclamp.min", @@ -10121,7 +10121,7 @@ static struct cftype cpu_files[] =3D { .seq_show =3D cpu_uclamp_max_show, .write =3D cpu_uclamp_max_write, }, -#endif +#endif /* CONFIG_UCLAMP_TASK_GROUP */ { } /* terminate */ }; =20 @@ -10142,7 +10142,7 @@ struct cgroup_subsys cpu_cgrp_subsys =3D { .threaded =3D true, }; =20 -#endif /* CONFIG_CGROUP_SCHED */ +#endif /* CONFIG_CGROUP_SCHED */ =20 void dump_cpu_task(int cpu) { @@ -10728,7 +10728,7 @@ void sched_mm_cid_fork(struct task_struct *t) WARN_ON_ONCE(!t->mm || t->mm_cid !=3D -1); t->mm_cid_active =3D 1; } -#endif +#endif /* CONFIG_SCHED_MM_CID */ =20 #ifdef CONFIG_SCHED_CLASS_EXT void sched_deq_and_put_task(struct task_struct *p, int queue_flags, @@ -10763,4 +10763,4 @@ void sched_enq_and_set_task(struct sched_enq_and_se= t_ctx *ctx) if (ctx->running) set_next_task(rq, ctx->p); } -#endif /* CONFIG_SCHED_CLASS_EXT */ +#endif /* CONFIG_SCHED_CLASS_EXT */ --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 2ABF824DCEE for ; Wed, 28 May 2025 08:10:02 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419803; cv=none; b=C97pQj9ZqICLmaw9xWqAV1roFQA8hiZNN6ClU36MkxJ5uJ5QIqEICKFIVBCZn4C6V+nk7RhIQ03Hts5m5USph/Cu0qjI0aCdofc8Heup8V4mdco0luOHd9MTMrSHGJ2wOf2fusmpn9tyQNEeVMkunhk7zwZRabbU6+dQXB0wBME= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419803; c=relaxed/simple; bh=2yy4uprkjJ9k8z5aYq2LuPZtLWlldXfXM4RXFHPqX4Q=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=UI/OBwR9QSLyFB5suv9p2IwE4386fyUpw1NiawQd/h1Yp1Zpj6qQSK4ioKybSUBG9ajY+uqmDC+TWO1HN1blYfTodn31rUGdtObStCa2VYb9kk8TwEo7uZf4vxlNxEXXp09E2fDu4cqpZVkWQJnby/Bf7tmjbHfQwN1BlvEgET4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=j2M1bSkh; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="j2M1bSkh" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 84208C4CEEB; Wed, 28 May 2025 08:09:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419802; bh=2yy4uprkjJ9k8z5aYq2LuPZtLWlldXfXM4RXFHPqX4Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=j2M1bSkh0//KRHIiNOVUp5xWktHsdWVQ2UxzqV6N9cQpNDeSlljHm/QbFQ17GvnfA UIsodlxFFNlVakYXws7S0gIeiTbQmdAytXvdcwJxFjtd+dgts6YYrpFbp6eSSMVDHU Fo6G95K1KW3xC4ITZTNkWmb6Wqnkn9SQOjMifT6Lwi2nRHxhEQGUO76sSDmBxhm3z5 MvyobJTK3pAehkzFKGQbCdusEzcncL9SLysh3QI30bBBbI4z/wqc9RbaWSeDCcO36y BueAvl/wuXaUYPrZUhHE8EWZiZ54Uc+iStWhMvzjN+C7KEoplqRE2BVv+zfxZEA/i4 6a1wX63pr3+XA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 04/43] sched: Clean up and standardize #if/#else/#endif markers in sched/cpufreq_schedutil.c Date: Wed, 28 May 2025 10:08:45 +0200 Message-ID: <20250528080924.2273858-5-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/cpufreq_schedutil.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedu= til.c index 816f07f9d30f..a28d0f94ed6b 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -380,9 +380,9 @@ static bool sugov_hold_freq(struct sugov_cpu *sg_cpu) sg_cpu->saved_idle_calls =3D idle_calls; return ret; } -#else +#else /* !CONFIG_NO_HZ_COMMON: */ static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return fals= e; } -#endif /* CONFIG_NO_HZ_COMMON */ +#endif /* !CONFIG_NO_HZ_COMMON */ =20 /* * Make sugov_should_update_freq() ignore the rate limit when DL --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3D24224DD1B for ; Wed, 28 May 2025 08:10:06 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419806; cv=none; b=u15z5aUDHFKri6XK3BOjxsnME1FQ3aw+bSrA72OOQJHcwlg8/qobHoK7s42gz6VSM+Y0CCpERLPFVrgrG0Lz2CfKes9uxNmXo8go13SoapxwVQaGmtsp01PJvaXgH5zud8cVh7s+L4kSkM6haLhAwu4piOT3BkYmK4qNfReY8v8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419806; c=relaxed/simple; bh=+K51kWrxWCvbKfp493M0b+C5HU6KPSTl/Y8J4sAULk4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=eToaoTHT8zEnitNUM1UI/Jf5NfYL3jL2P7/elc5zjigJT2XLg3LOT1MYS6TRzaVI2PNdiap3/gEEPi8Yc+IO34Oo1+7XrFol36U7ZQZVJhaFFoxbocmOS0DFkH/zQJlscN34yYApCSNm+/9TxL0sOvF+8qt3kNarDPBRKSEl88E= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=IZdsq/U5; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="IZdsq/U5" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 129B2C4CEEF; Wed, 28 May 2025 08:10:02 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419806; bh=+K51kWrxWCvbKfp493M0b+C5HU6KPSTl/Y8J4sAULk4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=IZdsq/U5w+iYXqJpnN0hTJ7UTg9x7a1jN6608OPjt+kKTXmT/SIEuCZPrHFquVVyj +dyq+DXeUmFdetg+AlJLYr7SEYaA2g5WZe2N/t1b1qWWobgDp9+SsE/REEMpGBcXzy iVwytzS2A0Rip6IpXKngZ2MSlEChyHNB2ILjznWO8FR86elCmKk3lpwdNfsAbuR4zN ajJWys9a7okVeilK7N47dD6CFU7nH7iRIdOid1rzGhYtuQktpFlunPZNZzcM3PkTjw i/hVa2uge/cKvI3IA1EaqS3HYQIZ7Ot1FogQk1L07kXpbQivv359uRiuHO5BJtAekv OEKBoPghvnIFA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 05/43] sched: Clean up and standardize #if/#else/#endif markers in sched/cpupri.h Date: Wed, 28 May 2025 10:08:46 +0200 Message-ID: <20250528080924.2273858-6-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/cpupri.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h index d6cba0020064..f9c18e77d6a6 100644 --- a/kernel/sched/cpupri.h +++ b/kernel/sched/cpupri.h @@ -26,4 +26,4 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_s= truct *p, void cpupri_set(struct cpupri *cp, int cpu, int pri); int cpupri_init(struct cpupri *cp); void cpupri_cleanup(struct cpupri *cp); -#endif +#endif /* CONFIG_SMP */ --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 243EB247285 for ; Wed, 28 May 2025 08:10:09 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419810; cv=none; b=XBwuMA+vGzCLKZP9YLFfgfcwJFPKPa7XwrTuZeXVQrF8TSyDWEKQFE710orvXuXak4RlMZpf8JBsEvEvHeMCiDG/mA6PjtjkFp2TvX6m1AVqt6h0yg2LPvVPoEKaiAHd1+rA3Csf0fFvmLl5iomjPUvwh1jaOTdwBrSwROwgHHc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419810; c=relaxed/simple; bh=AxBFKsNuYs50HDFmVhGNFAAe2Ve+EczsBe0uyX7QE48=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=rj05zprnPMxTvsLpYgnCYEfzoLAIi6ZD4ZF4GF7a1IjZjCiGfCLNbDCG7RRbWP5jasgCJKo/wQ74AJs4Mkc5QXZh5mCT5ZaXCHZqCnm/iffjHBpVdONHy6fj0viNKIto3la8H1WABQqn251jpXxmtlNy81xHdz8O6n/wMJbhMwI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Z5ZxzWpN; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Z5ZxzWpN" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 97BB9C4CEE7; Wed, 28 May 2025 08:10:06 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419809; bh=AxBFKsNuYs50HDFmVhGNFAAe2Ve+EczsBe0uyX7QE48=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Z5ZxzWpNo19c9sXgQhYikwqccMUU3EZ2fuFL1EWGt/rt4ZFZz4kReLd5uRZ1sOLPJ vy+oSxBR++I0YPyDDNJqprm72Mbkxxu0NTlcigehr4WLfgbuewwLhwVshN9YDjfwGh ttHwPgk67HsWpqcpnsW+xks57/FtV4hIp3+cOCFPHuzqpfCjjVan77/8z1cTKyLuNP jhUpeQbkun6G27BXLFUPQYZxFlie1NBagCm0iWO6Q/d2weJ5szwo9ZoyNRyrrjzLsT 3dsIoaAtoEfysQlijfXhtWJUz0e2+RogU2Ho80+RiBqJDNHezIIMprUvnfZMU4xyIP iv1U5s9VLA0cA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 06/43] sched: Clean up and standardize #if/#else/#endif markers in sched/cputime.c Date: Wed, 28 May 2025 10:08:47 +0200 Message-ID: <20250528080924.2273858-7-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/cputime.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 6dab4854c6c0..dd745789c9fc 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -88,7 +88,7 @@ static u64 irqtime_tick_accounted(u64 maxtime) return delta; } =20 -#else /* CONFIG_IRQ_TIME_ACCOUNTING */ +#else /* !CONFIG_IRQ_TIME_ACCOUNTING: */ =20 static u64 irqtime_tick_accounted(u64 dummy) { @@ -241,7 +241,7 @@ void __account_forceidle_time(struct task_struct *p, u6= 4 delta) =20 task_group_account_field(p, CPUTIME_FORCEIDLE, delta); } -#endif +#endif /* CONFIG_SCHED_CORE */ =20 /* * When a guest is interrupted for a longer amount of time, missed clock @@ -262,7 +262,7 @@ static __always_inline u64 steal_account_process_time(u= 64 maxtime) =20 return steal; } -#endif +#endif /* CONFIG_PARAVIRT */ return 0; } =20 @@ -288,7 +288,7 @@ static inline u64 read_sum_exec_runtime(struct task_str= uct *t) { return t->se.sum_exec_runtime; } -#else +#else /* !CONFIG_64BIT: */ static u64 read_sum_exec_runtime(struct task_struct *t) { u64 ns; @@ -301,7 +301,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) =20 return ns; } -#endif +#endif /* !CONFIG_64BIT */ =20 /* * Accumulate raw cputime values of dead tasks (sig->[us]time) and live @@ -411,11 +411,11 @@ static void irqtime_account_idle_ticks(int ticks) { irqtime_account_process_tick(current, 0, ticks); } -#else /* CONFIG_IRQ_TIME_ACCOUNTING */ +#else /* !CONFIG_IRQ_TIME_ACCOUNTING: */ static inline void irqtime_account_idle_ticks(int ticks) { } static inline void irqtime_account_process_tick(struct task_struct *p, int= user_tick, int nr_ticks) { } -#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ +#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ =20 /* * Use precise platform statistics if available: --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B00622472A5 for ; Wed, 28 May 2025 08:10:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419813; cv=none; b=MJ92/w4C/74d+ZPm7c3WN3mhzPpZq7uWmSU12eE1IzXkScMwp/k5Gk4wu0EWOzmgpcvsDsjdNOf3+DIMMhlTLfJE1ln3yDuIuOOL3xB0AqWlsbr2jNphlhwWetMHWrhDILZ0r0BpXH9PnMwscrsHBxQ879UheuX+8xnLyrqlpSM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419813; c=relaxed/simple; bh=n7Fd0CximPmTongnDQRpr8euc9lxQ5wvM8sWUUcg2Ms=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=AaW5JJkcWMY5G3qPQ8wNGhvWRnaUWJXjud1csQzZgNe9Ou6Lcq+rVWq9Mn+6d0Iqo8GBZQxujU/opiyMDt+ze0OoXXTbKNdqeGvLQ/d6DoB80swI9i5s9mdxmB9J0b42MZgZle37NRHrLG5062YXaXApCpm+vY4dpbFnf4cxozI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=nM34g3Pn; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="nM34g3Pn" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 22C56C4CEEB; Wed, 28 May 2025 08:10:09 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419813; bh=n7Fd0CximPmTongnDQRpr8euc9lxQ5wvM8sWUUcg2Ms=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=nM34g3Pn7qJ+VcC5iqsHmIloOR4G1jSSDtHCkCUIo2tCS/0etvThzQpboKdTxSuxm R95fI47bvt8Swt31JdoL+ZCQnDTlmVhfMaV7OBg6pMGXX1JVijDjRVa/KYwN4a8/Zf Qdz6orzPxAh3YCNhIHLpRQ5GgwK//dL/p1WtKA1vusHyeJr2/S1HGliXn1ptro0zve J3FWZLzw2NpUX59Ev3C+9yp9wTWuzA33vBHTQSMkj4NIrX4/sTGMxNGbmUcgDk/J66 cXnQBd92wQJOARz2b2sPKazZhhaM2cPGhbr+XL6dl8LDHTQPJSma10K8zpKJKQDHTU pTDoH4CiLz2tA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 07/43] sched: Clean up and standardize #if/#else/#endif markers in sched/deadline.c Date: Wed, 28 May 2025 10:08:48 +0200 Message-ID: <20250528080924.2273858-8-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/deadline.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index ad45a8fea245..7df38ea4d650 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -51,7 +51,7 @@ static int __init sched_dl_sysctl_init(void) return 0; } late_initcall(sched_dl_sysctl_init); -#endif +#endif /* CONFIG_SYSCTL */ =20 static bool dl_server(struct sched_dl_entity *dl_se) { @@ -99,7 +99,7 @@ static inline bool is_dl_boosted(struct sched_dl_entity *= dl_se) { return pi_of(dl_se) !=3D dl_se; } -#else +#else /* !CONFIG_RT_MUTEXES: */ static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se) { return dl_se; @@ -109,7 +109,7 @@ static inline bool is_dl_boosted(struct sched_dl_entity= *dl_se) { return false; } -#endif +#endif /* !CONFIG_RT_MUTEXES */ =20 #ifdef CONFIG_SMP static inline struct dl_bw *dl_bw_of(int i) @@ -191,7 +191,7 @@ void __dl_update(struct dl_bw *dl_b, s64 bw) rq->dl.extra_bw +=3D bw; } } -#else +#else /* !CONFIG_SMP: */ static inline struct dl_bw *dl_bw_of(int i) { return &cpu_rq(i)->dl.dl_bw; @@ -219,7 +219,7 @@ void __dl_update(struct dl_bw *dl_b, s64 bw) =20 dl->extra_bw +=3D bw; } -#endif +#endif /* !CONFIG_SMP */ =20 static inline void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) @@ -753,7 +753,7 @@ static struct rq *dl_task_offline_migration(struct rq *= rq, struct task_struct *p return later_rq; } =20 -#else +#else /* !CONFIG_SMP: */ =20 static inline void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) @@ -782,7 +782,7 @@ static inline void deadline_queue_push_tasks(struct rq = *rq) static inline void deadline_queue_pull_task(struct rq *rq) { } -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 static void enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags); @@ -1209,7 +1209,7 @@ static void __push_dl_task(struct rq *rq, struct rq_f= lags *rf) push_dl_task(rq); rq_repin_lock(rq, rf); } -#endif +#endif /* CONFIG_SMP */ } =20 /* a defer timer will not be reset if the runtime consumed was < dl_server= _min_res */ @@ -1356,7 +1356,7 @@ static enum hrtimer_restart dl_task_timer(struct hrti= mer *timer) * there. */ } -#endif +#endif /* CONFIG_SMP */ =20 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); if (dl_task(rq->donor)) @@ -1598,7 +1598,7 @@ static void update_curr_dl_se(struct rq *rq, struct s= ched_dl_entity *dl_se, s64 rt_rq->rt_time +=3D delta_exec; raw_spin_unlock(&rt_rq->rt_runtime_lock); } -#endif +#endif /* CONFIG_RT_GROUP_SCHED */ } =20 /* @@ -1881,12 +1881,12 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u6= 4 deadline) } } =20 -#else +#else /* !CONFIG_SMP: */ =20 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} =20 -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 static inline void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) @@ -2375,11 +2375,11 @@ static void start_hrtick_dl(struct rq *rq, struct s= ched_dl_entity *dl_se) { hrtick_start(rq, dl_se->runtime); } -#else /* !CONFIG_SCHED_HRTICK */ +#else /* !CONFIG_SCHED_HRTICK: */ static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se) { } -#endif +#endif /* !CONFIG_SCHED_HRTICK */ =20 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool fi= rst) { @@ -3121,13 +3121,13 @@ static void prio_changed_dl(struct rq *rq, struct t= ask_struct *p, dl_time_before(p->dl.deadline, rq->curr->dl.deadline)) resched_curr(rq); } -#else +#else /* !CONFIG_SMP: */ /* * We don't know if p has a earlier or later deadline, so let's blindly * set a (maybe not needed) rescheduling point. */ resched_curr(rq); -#endif +#endif /* !CONFIG_SMP */ } =20 #ifdef CONFIG_SCHED_CORE @@ -3158,7 +3158,7 @@ DEFINE_SCHED_CLASS(dl) =3D { .rq_offline =3D rq_offline_dl, .task_woken =3D task_woken_dl, .find_lock_rq =3D find_lock_later_rq, -#endif +#endif /* CONFIG_SMP */ =20 .task_tick =3D task_tick_dl, .task_fork =3D task_fork_dl, @@ -3570,7 +3570,7 @@ void dl_bw_free(int cpu, u64 dl_bw) { dl_bw_manage(dl_bw_req_free, cpu, dl_bw); } -#endif +#endif /* CONFIG_SMP */ =20 void print_dl_stats(struct seq_file *m, int cpu) { --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C93EF2459D9 for ; Wed, 28 May 2025 08:10:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419816; cv=none; b=DWAgUVhDchU9FaMEOWH3rv5iW3lAy4OjA7xLwmywMmHgujqpewmeVMXBsekscYehGhJ8uwOKIogDZ71L0LlPvpV+oRp8FSP2NigH/rtzzt24QisZReTYExtL8F7sbj1m/mYYbrm41Am3E2No7w0RaaQk8PLMOyKNKvx6iL5/bS4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419816; c=relaxed/simple; bh=FltFMf430RirGxiT9qRIFlCszu6z4QCr5D/YG0W1Npc=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=PyPoebHRjHNMK+rpwZHqJyTlRU3JG4HsbYuYEzllqHlATgZ3/8NBUvoSiiG3z4UT9Cx+HZ4fGyffJoOPNlwPZy8iEwKPylJwVJt/qZQenNgSj1i29k7PQ9LILK2PH0Avv2VxHGPkITpLvAPp0Ni/qQgUQxjF7sQxFz+1dc5TA7U= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=UwziJpUk; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="UwziJpUk" Received: by smtp.kernel.org (Postfix) with ESMTPSA id A3FB0C4CEEF; Wed, 28 May 2025 08:10:13 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419816; bh=FltFMf430RirGxiT9qRIFlCszu6z4QCr5D/YG0W1Npc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=UwziJpUkgFPLH6a1rt5o95YXb3kH1SygqhLXE61LX7vzmJXY1YDXf4NeAGThqO7wq dxlKVHYpb7NiXVQCYGOlsQV73iSpwxg9I/5B3cH7MJ4AgvJSrFz8Pfx9HNlIUnlKNT fi/d/7+fmWGl7R+UKM3xKheH1CW7tuqkPynR2oaLto0MBrhc/LYyHz4niRJ/QveL0D B4ARhKSUjGyhWgbBi1NOkdnwsnzxBJhABGUKxWDx4qkVvTMIm5tHT1zItArIPOVdZf cMCEskvrqE/5TBtHT3QbLk7J3o3O4Hz8emV2tK9iEaYsuJcmj9UQXyRoPIN+oLRUgE mGoPEJ/Z6ZOVg== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 08/43] sched: Clean up and standardize #if/#else/#endif markers in sched/debug.c Date: Wed, 28 May 2025 10:08:49 +0200 Message-ID: <20250528080924.2273858-9-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/debug.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 557246880a7e..aaba8661aa46 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -90,10 +90,10 @@ static void sched_feat_enable(int i) { static_key_enable_cpuslocked(&sched_feat_keys[i]); } -#else +#else /* !CONFIG_JUMP_LABEL: */ static void sched_feat_disable(int i) { }; static void sched_feat_enable(int i) { }; -#endif /* CONFIG_JUMP_LABEL */ +#endif /* !CONFIG_JUMP_LABEL */ =20 static int sched_feat_set(char *cmp) { @@ -214,7 +214,7 @@ static const struct file_operations sched_scaling_fops = =3D { .release =3D single_release, }; =20 -#endif /* SMP */ +#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_PREEMPT_DYNAMIC =20 @@ -311,9 +311,9 @@ static ssize_t sched_verbose_write(struct file *filp, c= onst char __user *ubuf, =20 return result; } -#else -#define sched_verbose_write debugfs_write_file_bool -#endif +#else /* !CONFIG_SMP: */ +# define sched_verbose_write debugfs_write_file_bool +#endif /* !CONFIG_SMP */ =20 static const struct file_operations sched_verbose_fops =3D { .read =3D debugfs_read_file_bool, @@ -520,7 +520,7 @@ static __init int sched_init_debug(void) sched_domains_mutex_lock(); update_sched_domain_debugfs(); sched_domains_mutex_unlock(); -#endif +#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_NUMA_BALANCING numa =3D debugfs_create_dir("numa_balancing", debugfs_sched); @@ -530,7 +530,7 @@ static __init int sched_init_debug(void) debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balanci= ng_scan_period_max); debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_sca= n_size); debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing= _hot_threshold); -#endif +#endif /* CONFIG_NUMA_BALANCING */ =20 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops= ); =20 @@ -694,14 +694,14 @@ static void print_cfs_group_stats(struct seq_file *m,= int cpu, struct task_group P(se->avg.load_avg); P(se->avg.util_avg); P(se->avg.runnable_avg); -#endif +#endif /* CONFIG_SMP */ =20 #undef PN_SCHEDSTAT #undef PN #undef P_SCHEDSTAT #undef P } -#endif +#endif /* CONFIG_FAIR_GROUP_SCHED */ =20 #ifdef CONFIG_CGROUP_SCHED static DEFINE_SPINLOCK(sched_debug_lock); @@ -874,8 +874,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct c= fs_rq *cfs_rq) cfs_rq->tg_load_avg_contrib); SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", atomic_long_read(&cfs_rq->tg->load_avg)); -#endif -#endif +#endif /* CONFIG_FAIR_GROUP_SCHED */ +#endif /* CONFIG_SMP */ #ifdef CONFIG_CFS_BANDWIDTH SEQ_printf(m, " .%-30s: %d\n", "throttled", cfs_rq->throttled); @@ -951,9 +951,9 @@ static void print_cpu(struct seq_file *m, int cpu) SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", cpu, freq / 1000, (freq % 1000)); } -#else +#else /* !CONFIG_X86: */ SEQ_printf(m, "cpu#%d\n", cpu); -#endif +#endif /* !CONFIG_X86 */ =20 #define P(x) \ do { \ @@ -981,7 +981,7 @@ do { \ P64(avg_idle); P64(max_idle_balance_cost); #undef P64 -#endif +#endif /* CONFIG_SMP */ =20 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n)); if (schedstat_enabled()) { @@ -1163,7 +1163,7 @@ static void sched_show_numa(struct task_struct *p, st= ruct seq_file *m) SEQ_printf(m, "current_node=3D%d, numa_group_id=3D%d\n", task_node(p), task_numa_group_id(p)); show_numa_stats(p, m); -#endif +#endif /* CONFIG_NUMA_BALANCING */ } =20 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, @@ -1256,13 +1256,13 @@ void proc_sched_show_task(struct task_struct *p, st= ruct pid_namespace *ns, P(se.avg.util_avg); P(se.avg.last_update_time); PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED); -#endif +#endif /* CONFIG_SMP */ #ifdef CONFIG_UCLAMP_TASK __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value); __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value); __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN)); __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX)); -#endif +#endif /* CONFIG_UCLAMP_TASK */ P(policy); P(prio); if (task_has_dl_policy(p)) { --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 56DD22561A7 for ; Wed, 28 May 2025 08:10:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419820; cv=none; b=AcNJbxiGlRGq6Uoml7T2GuzCfXaX4zWyklOfvIhFATwtK9ifeA9oin9V5622ZUjXru2ExqRmtSZiPW0BK8xHGgcGS1Ck7QPYCMoCAP/DqScvwNuQ7H1QBfkF6QJuEtZ2j8b9w79OJWiY+EL3xPYSOC5EcGtCuMEifeysfqbFBe4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419820; c=relaxed/simple; bh=2GwVlWP3kyZi+qRaDCBa96mEqEKFz+u49EgTaZyIqCs=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=FZ9DK3utW3qjLwFzEZ6AmXLhPxWRAwbEZBnrpOzhBi9fKzU4WSyQ4T30/LNITkr47+kCN9/8GTr9OI7cunRoON45HgnOKn1UvwTdmoHrnmdwDl+Kx69oddKagruq4wjva817GCSDE9Vc7XAsvslLiaQS0Sy5ltjQFH3xQI3TpzU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=SybNsy+7; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="SybNsy+7" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 2EB2AC4CEE7; Wed, 28 May 2025 08:10:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419820; bh=2GwVlWP3kyZi+qRaDCBa96mEqEKFz+u49EgTaZyIqCs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=SybNsy+73QApF5u/lQ4sDtJ73EptyWVnjCBMVa1YKNkVZEK7MBmMDpz0GV+MmqUt0 0bDGPHUJptThSiW3dxVibDdbWzXKqYkK6Mo2+DSQEhmOzT16ZmBOY9jyeP4b+Wc8Be e1YteUxYBswlGLtWuNVh88/ArpZUw2vwH2Q3rA4yyW0fCEQtgxiDHqHobxdH/JHvCX k+VYod60vZjoiYDeUgZ1cojA+t3H9Lw2Ek04BR/Jhni+ZnFK4D0gTBn1AHJyZdkdtZ tvU0uKC8HDfw19+9Oe05vb8if85kgYVquLsXGJFZCY2Lf6JxED7ydaUAWe2NBsmvZf RtZrSm+9OmaqA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 09/43] sched: Clean up and standardize #if/#else/#endif markers in sched/fair.c Date: Wed, 28 May 2025 10:08:50 +0200 Message-ID: <20250528080924.2273858-10-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/fair.c | 111 ++++++++++++++++++++++++++----------------------= ---- 1 file changed, 56 insertions(+), 55 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 125912c0e9dd..92ae3da8ca37 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -111,7 +111,7 @@ int __weak arch_asym_cpu_priority(int cpu) * (default: ~5%) */ #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078) -#endif +#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_CFS_BANDWIDTH /* @@ -162,7 +162,7 @@ static int __init sched_fair_sysctl_init(void) return 0; } late_initcall(sched_fair_sysctl_init); -#endif +#endif /* CONFIG_SYSCTL */ =20 static inline void update_load_add(struct load_weight *lw, unsigned long i= nc) { @@ -471,7 +471,7 @@ static int se_is_idle(struct sched_entity *se) return cfs_rq_is_idle(group_cfs_rq(se)); } =20 -#else /* !CONFIG_FAIR_GROUP_SCHED */ +#else /* !CONFIG_FAIR_GROUP_SCHED: */ =20 #define for_each_sched_entity(se) \ for (; se; se =3D NULL) @@ -517,7 +517,7 @@ static int se_is_idle(struct sched_entity *se) return task_has_idle_policy(task_of(se)); } =20 -#endif /* CONFIG_FAIR_GROUP_SCHED */ +#endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); @@ -1008,7 +1008,7 @@ int sched_update_scaling(void) =20 return 0; } -#endif +#endif /* CONFIG_SMP */ =20 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se); =20 @@ -1041,6 +1041,7 @@ static bool update_deadline(struct cfs_rq *cfs_rq, st= ruct sched_entity *se) } =20 #include "pelt.h" + #ifdef CONFIG_SMP =20 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cp= u); @@ -1131,7 +1132,7 @@ void post_init_entity_util_avg(struct task_struct *p) sa->runnable_avg =3D sa->util_avg; } =20 -#else /* !CONFIG_SMP */ +#else /* !CONFIG_SMP: */ void init_entity_runnable_average(struct sched_entity *se) { } @@ -1141,7 +1142,7 @@ void post_init_entity_util_avg(struct task_struct *p) static void update_tg_load_avg(struct cfs_rq *cfs_rq) { } -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 static s64 update_curr_se(struct rq *rq, struct sched_entity *curr) { @@ -2114,12 +2115,12 @@ static inline int numa_idle_core(int idle_core, int= cpu) =20 return idle_core; } -#else +#else /* !CONFIG_SCHED_SMT: */ static inline int numa_idle_core(int idle_core, int cpu) { return idle_core; } -#endif +#endif /* !CONFIG_SCHED_SMT */ =20 /* * Gather all necessary information to make NUMA balancing placement @@ -3663,7 +3664,8 @@ static void update_scan_period(struct task_struct *p,= int new_cpu) p->numa_scan_period =3D task_scan_start(p); } =20 -#else +#else /* !CONFIG_NUMA_BALANCING: */ + static void task_tick_numa(struct rq *rq, struct task_struct *curr) { } @@ -3680,7 +3682,7 @@ static inline void update_scan_period(struct task_str= uct *p, int new_cpu) { } =20 -#endif /* CONFIG_NUMA_BALANCING */ +#endif /* !CONFIG_NUMA_BALANCING */ =20 static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -3775,12 +3777,12 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sche= d_entity *se) cfs_rq->avg.load_sum =3D max_t(u32, cfs_rq->avg.load_sum, cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); } -#else +#else /* !CONFIG_SMP: */ static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } static inline void dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } -#endif +#endif /* !CONFIG_SMP */ =20 static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, i= nt flags); =20 @@ -3990,11 +3992,11 @@ static void update_cfs_group(struct sched_entity *s= e) reweight_entity(cfs_rq_of(se), se, shares); } =20 -#else /* CONFIG_FAIR_GROUP_SCHED */ +#else /* !CONFIG_FAIR_GROUP_SCHED: */ static inline void update_cfs_group(struct sched_entity *se) { } -#endif /* CONFIG_FAIR_GROUP_SCHED */ +#endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) { @@ -4471,7 +4473,7 @@ static inline bool skip_blocked_update(struct sched_e= ntity *se) return true; } =20 -#else /* CONFIG_FAIR_GROUP_SCHED */ +#else /* !CONFIG_FAIR_GROUP_SCHED: */ =20 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {} =20 @@ -4484,7 +4486,7 @@ static inline int propagate_entity_load_avg(struct sc= hed_entity *se) =20 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnab= le_sum) {} =20 -#endif /* CONFIG_FAIR_GROUP_SCHED */ +#endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 #ifdef CONFIG_NO_HZ_COMMON static inline void migrate_se_pelt_lag(struct sched_entity *se) @@ -4565,9 +4567,9 @@ static inline void migrate_se_pelt_lag(struct sched_e= ntity *se) =20 __update_load_avg_blocked_se(now, se); } -#else +#else /* !CONFIG_NO_HZ_COMMON: */ static void migrate_se_pelt_lag(struct sched_entity *se) {} -#endif +#endif /* !CONFIG_NO_HZ_COMMON */ =20 /** * update_cfs_rq_load_avg - update the cfs_rq's load/util averages @@ -5134,7 +5136,7 @@ static inline void update_misfit_status(struct task_s= truct *p, struct rq *rq) rq->misfit_task_load =3D max_t(unsigned long, task_h_load(p), 1); } =20 -#else /* CONFIG_SMP */ +#else /* !CONFIG_SMP: */ =20 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) { @@ -5174,7 +5176,7 @@ util_est_update(struct cfs_rq *cfs_rq, struct task_st= ruct *p, bool task_sleep) {} static inline void update_misfit_status(struct task_struct *p, struct rq *= rq) {} =20 -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 void __setparam_fair(struct task_struct *p, const struct sched_attr *attr) { @@ -5675,7 +5677,7 @@ void cfs_bandwidth_usage_dec(void) { static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); } -#else /* CONFIG_JUMP_LABEL */ +#else /* !CONFIG_JUMP_LABEL: */ static bool cfs_bandwidth_used(void) { return true; @@ -5683,7 +5685,7 @@ static bool cfs_bandwidth_used(void) =20 void cfs_bandwidth_usage_inc(void) {} void cfs_bandwidth_usage_dec(void) {} -#endif /* CONFIG_JUMP_LABEL */ +#endif /* !CONFIG_JUMP_LABEL */ =20 /* * default period for cfs group bandwidth. @@ -6137,12 +6139,12 @@ static inline void __unthrottle_cfs_rq_async(struct= cfs_rq *cfs_rq) if (first) smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd); } -#else +#else /* !CONFIG_SMP: */ static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) { unthrottle_cfs_rq(cfs_rq); } -#endif +#endif /* !CONFIG_SMP */ =20 static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) { @@ -6723,9 +6725,9 @@ static void sched_fair_update_stop_tick(struct rq *rq= , struct task_struct *p) if (cfs_task_bw_constrained(p)) tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); } -#endif +#endif /* CONFIG_NO_HZ_FULL */ =20 -#else /* CONFIG_CFS_BANDWIDTH */ +#else /* !CONFIG_CFS_BANDWIDTH: */ =20 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) = {} static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } @@ -6767,7 +6769,7 @@ bool cfs_task_bw_constrained(struct task_struct *p) return false; } #endif -#endif /* CONFIG_CFS_BANDWIDTH */ +#endif /* !CONFIG_CFS_BANDWIDTH */ =20 #if !defined(CONFIG_CFS_BANDWIDTH) || !defined(CONFIG_NO_HZ_FULL) static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_= struct *p) {} @@ -6812,7 +6814,7 @@ static void hrtick_update(struct rq *rq) =20 hrtick_start_fair(rq, donor); } -#else /* !CONFIG_SCHED_HRTICK */ +#else /* !CONFIG_SCHED_HRTICK: */ static inline void hrtick_start_fair(struct rq *rq, struct task_struct *p) { @@ -6821,7 +6823,7 @@ hrtick_start_fair(struct rq *rq, struct task_struct *= p) static inline void hrtick_update(struct rq *rq) { } -#endif +#endif /* !CONFIG_SCHED_HRTICK */ =20 #ifdef CONFIG_SMP static inline bool cpu_overutilized(int cpu) @@ -6865,9 +6867,9 @@ static inline void check_update_overutilized_status(s= truct rq *rq) if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu)) set_rd_overutilized(rq->rd, 1); } -#else +#else /* !CONFIG_SMP: */ static inline void check_update_overutilized_status(struct rq *rq) { } -#endif +#endif /* !CONFIG_SMP */ =20 /* Runqueue only has SCHED_IDLE tasks enqueued */ static int sched_idle_rq(struct rq *rq) @@ -7667,7 +7669,7 @@ static int select_idle_smt(struct task_struct *p, str= uct sched_domain *sd, int t return -1; } =20 -#else /* CONFIG_SCHED_SMT */ +#else /* !CONFIG_SCHED_SMT: */ =20 static inline void set_idle_cores(int cpu, int val) { @@ -7688,7 +7690,7 @@ static inline int select_idle_smt(struct task_struct = *p, struct sched_domain *sd return -1; } =20 -#endif /* CONFIG_SCHED_SMT */ +#endif /* !CONFIG_SCHED_SMT */ =20 /* * Scan the LLC domain for idle CPUs; this is dynamically regulated by @@ -8733,9 +8735,9 @@ balance_fair(struct rq *rq, struct task_struct *prev,= struct rq_flags *rf) =20 return sched_balance_newidle(rq, rf) !=3D 0; } -#else +#else /* !CONFIG_SMP: */ static inline void set_task_max_allowed_capacity(struct task_struct *p) {} -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 static void set_next_buddy(struct sched_entity *se) { @@ -8929,7 +8931,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct= *prev, struct rq_flags *rf return p; =20 simple: -#endif +#endif /* CONFIG_FAIR_GROUP_SCHED */ put_prev_set_next_task(rq, prev, p); return p; =20 @@ -9347,13 +9349,13 @@ static long migrate_degrades_locality(struct task_s= truct *p, struct lb_env *env) return src_weight - dst_weight; } =20 -#else +#else /* !CONFIG_NUMA_BALANCING: */ static inline long migrate_degrades_locality(struct task_struct *p, struct lb_env *env) { return 0; } -#endif +#endif /* !CONFIG_NUMA_BALANCING */ =20 /* * Check whether the task is ineligible on the destination cpu @@ -9762,12 +9764,12 @@ static inline void update_blocked_load_status(struc= t rq *rq, bool has_blocked) if (!has_blocked) rq->has_blocked_load =3D 0; } -#else +#else /* !CONFIG_NO_HZ_COMMON: */ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return fals= e; } static inline bool others_have_blocked(struct rq *rq) { return false; } static inline void update_blocked_load_tick(struct rq *rq) {} static inline void update_blocked_load_status(struct rq *rq, bool has_bloc= ked) {} -#endif +#endif /* !CONFIG_NO_HZ_COMMON */ =20 static bool __update_blocked_others(struct rq *rq, bool *done) { @@ -9876,7 +9878,7 @@ static unsigned long task_h_load(struct task_struct *= p) return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, cfs_rq_load_avg(cfs_rq) + 1); } -#else +#else /* !CONFIG_FAIR_GROUP_SCHED: */ static bool __update_blocked_fair(struct rq *rq, bool *done) { struct cfs_rq *cfs_rq =3D &rq->cfs; @@ -9893,7 +9895,7 @@ static unsigned long task_h_load(struct task_struct *= p) { return p->se.avg.load_avg; } -#endif +#endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 static void sched_balance_update_blocked_averages(int cpu) { @@ -10606,7 +10608,7 @@ static inline enum fbq_type fbq_classify_rq(struct = rq *rq) return remote; return all; } -#else +#else /* !CONFIG_NUMA_BALANCING: */ static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) { return all; @@ -10616,7 +10618,7 @@ static inline enum fbq_type fbq_classify_rq(struct = rq *rq) { return regular; } -#endif /* CONFIG_NUMA_BALANCING */ +#endif /* !CONFIG_NUMA_BALANCING */ =20 =20 struct sg_lb_stats; @@ -12762,7 +12764,7 @@ static void nohz_newidle_balance(struct rq *this_rq) atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu)); } =20 -#else /* !CONFIG_NO_HZ_COMMON */ +#else /* !CONFIG_NO_HZ_COMMON: */ static inline void nohz_balancer_kick(struct rq *rq) { } =20 static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_typ= e idle) @@ -12771,7 +12773,7 @@ static inline bool nohz_idle_balance(struct rq *thi= s_rq, enum cpu_idle_type idle } =20 static inline void nohz_newidle_balance(struct rq *this_rq) { } -#endif /* CONFIG_NO_HZ_COMMON */ +#endif /* !CONFIG_NO_HZ_COMMON */ =20 /* * sched_balance_newidle is called by schedule() if this_cpu is about to b= ecome @@ -13066,10 +13068,10 @@ bool cfs_prio_less(const struct task_struct *a, c= onst struct task_struct *b, =20 cfs_rqa =3D sea->cfs_rq; cfs_rqb =3D seb->cfs_rq; -#else +#else /* !CONFIG_FAIR_GROUP_SCHED: */ cfs_rqa =3D &task_rq(a)->cfs; cfs_rqb =3D &task_rq(b)->cfs; -#endif +#endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 /* * Find delta after normalizing se's vruntime with its cfs_rq's @@ -13093,9 +13095,9 @@ static int task_is_throttled_fair(struct task_struc= t *p, int cpu) #endif return throttled_hierarchy(cfs_rq); } -#else +#else /* !CONFIG_SCHED_CORE: */ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)= {} -#endif +#endif /* !CONFIG_SCHED_CORE */ =20 /* * scheduler tick hitting a task of our scheduling class. @@ -13189,9 +13191,9 @@ static void propagate_entity_cfs_rq(struct sched_en= tity *se) list_add_leaf_cfs_rq(cfs_rq); } } -#else +#else /* !CONFIG_FAIR_GROUP_SCHED: */ static void propagate_entity_cfs_rq(struct sched_entity *se) { } -#endif +#endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 static void detach_entity_cfs_rq(struct sched_entity *se) { @@ -13727,6 +13729,5 @@ __init void init_sched_fair_class(void) nohz.next_blocked =3D jiffies; zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); #endif -#endif /* SMP */ - +#endif /* CONFIG_SMP */ } --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D2997256C9E for ; Wed, 28 May 2025 08:10:23 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419823; cv=none; b=SL7vjJZ/jR5xwIudq6AIqGy6KtlnRHdDe2oH/o3ReXoIvF/rSZ9LQA0qfDca7kHiX2boZ7Mluv5YRRJuTaDAQWT9t1nGoeue5RuX3NjBYfjYriSY0yKUEi+/nx0nlFbYmp/iIt7wfxldrIamE4tZPeJd59oYWVTq9+xdleY8nyQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419823; c=relaxed/simple; bh=iW2wp64UhYj72B17BJT/RjglonE4/PkPrYzggByrwHI=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=A7LnhjwDEoE+apX5HHM2Q9RXjOIFxe/hLHwfPELRsM7/5cc7CRf1IaFXoadHYKXGnbFQwpZ5mKDYh+76G8VNwY/dEOr41ljZKVlaV0gFvq3iBTQDvxwEG3J+4Dli3hCmIY6RBoYqPGE59heV+2xXlPJ2KUqcb8n2TeMhaUEm6g0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=MiP70I7S; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="MiP70I7S" Received: by smtp.kernel.org (Postfix) with ESMTPSA id B2C6EC4CEEF; Wed, 28 May 2025 08:10:20 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419823; bh=iW2wp64UhYj72B17BJT/RjglonE4/PkPrYzggByrwHI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=MiP70I7S0UmS6XnSZZPzgIlLQWbHvAKff3nNFSKBMQ5UeD926ebRqs6W76mQikekH rx+6hBZu6uKQ+CKAHePuldY4AJItLW5dM/zCeeSQC3dCbWj9o+4xVlwa4daRKn6W9+ qE8YYT6mqH1a15Gd4zuTVjS5rfdEiTqWZoNfbatz9R+miHzHZIjBP2SGb5A+40stmM 2+m/JsGjmVGGAm26DOHa/M3ENf6xChT/L2wCvorKQJSbls4Sd1hkpBaNZPT1wO11bB kUPJj/IoaJ7sMZe3E/E3X0KklT4GGOs64PTmKvAfOBM0hBFyE3EheglcPx4iHwBWRz WQ6q+8K9SVXAw== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 10/43] sched: Clean up and standardize #if/#else/#endif markers in sched/idle.c Date: Wed, 28 May 2025 10:08:51 +0200 Message-ID: <20250528080924.2273858-11-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/idle.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 2c85c86b455f..cb01b063d733 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -47,7 +47,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused) return 1; } __setup("hlt", cpu_idle_nopoll_setup); -#endif +#endif /* CONFIG_GENERIC_IDLE_POLL_SETUP */ =20 static noinline int __cpuidle cpu_idle_poll(void) { @@ -95,10 +95,10 @@ static inline void cond_tick_broadcast_exit(void) if (static_branch_unlikely(&arch_needs_tick_broadcast)) tick_broadcast_exit(); } -#else +#else /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE: */ static inline void cond_tick_broadcast_enter(void) { } static inline void cond_tick_broadcast_exit(void) { } -#endif +#endif /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST_IDLE */ =20 /** * default_idle_call - Default CPU idle routine. @@ -439,7 +439,7 @@ balance_idle(struct rq *rq, struct task_struct *prev, s= truct rq_flags *rf) { return WARN_ON_ONCE(1); } -#endif +#endif /* CONFIG_SMP */ =20 /* * Idle tasks are unconditionally rescheduled: --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 60357248F44 for ; Wed, 28 May 2025 08:10:27 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419827; cv=none; b=OeIs58ijBRfYOP2UVhKP8Wqh2WPrOEXFq8/C0ZEw1Dt5HevhSpURbl0zf3cTCtErcTOsNfFfJuRCtlvLYSP7x+A12mkCrsiE14mMY7o29Kwb5VWkW1SwcMSgyjmjjScCCjNb87GTaKghcYlQELW0KoefROoYzFE8ClGMNIaVFZ4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419827; c=relaxed/simple; bh=P3OiQqibFlzAx0PMB2tm/D261Zww/xz/JyMwv2Fj7JI=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=SY8eT7nP9P1iy3Op1g3NkCVFw4uAp66/P9of8H2Lqn06OlJMBAFcjBcKMTLduzHLStEgbf9YDcqwzPKujITVxqcyn8AVMViTReQ/p/p/q8fJZtUsGOO/PFLktboy3PnJW16VxHiCM3iIWsbwINHt0tutM0GO2W7wRTvRMx1wnsQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=SKRSQtm8; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="SKRSQtm8" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3983FC4CEE7; Wed, 28 May 2025 08:10:24 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419827; bh=P3OiQqibFlzAx0PMB2tm/D261Zww/xz/JyMwv2Fj7JI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=SKRSQtm8kpyEmrtQYpAWzB1wLcckvdnzRZO943TCEJIBEmYTzizV2xe8Zi5PCXAMR S92oo7cncp5LBMhYuSuKl4hNzcleWlX0V/1BeY+YBc6jVbzihpwwReMR6alCMYT6LR lCmcrmxENu+IC3MrVTk8uNRddqOpApv1nij6Dn2d8Bow640gUxgu+WJukczaSAMx81 SF3stR1OaWlk9+NjoFEpQjMdWB7GnrlB0NPMnswpMIDBdXbLGLlLsKgscxt/fQ4cyG hCsQ3aMIfFoLHzzzPCsRkwSd4OCQW5ZFGiUcXV2dEYZXEqV0pQKwbMYFwpr/b9ngkJ Gi57pXbC8c6pQ== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 11/43] sched: Clean up and standardize #if/#else/#endif markers in sched/loadavg.c Date: Wed, 28 May 2025 10:08:52 +0200 Message-ID: <20250528080924.2273858-12-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/loadavg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index c48900b856a2..318a271f9e91 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -333,12 +333,12 @@ static void calc_global_nohz(void) smp_wmb(); calc_load_idx++; } -#else /* !CONFIG_NO_HZ_COMMON */ +#else /* !CONFIG_NO_HZ_COMMON: */ =20 static inline long calc_load_nohz_read(void) { return 0; } static inline void calc_global_nohz(void) { } =20 -#endif /* CONFIG_NO_HZ_COMMON */ +#endif /* !CONFIG_NO_HZ_COMMON */ =20 /* * calc_load - update the avenrun load estimates 10 ticks after the --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 57A1024469C for ; Wed, 28 May 2025 08:10:30 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419831; cv=none; b=bH4veU/qwZSQ9CrnLsc3eIuOF+Z7ZC5XlKaILlLZRl2aToe5AHF5MD9ekUxM0/Mul7e9oUt7jyMJQjM7faOZacdSEzPhfNahnoJuEF+XjIRg4HaAZORh0tJxbmuu+TzztIQEhio/XXPQWX+iCtmhLFqg75yizFp1KPAhAdnTnQw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419831; c=relaxed/simple; bh=ujQDYZVP7hXF+6mTGhNSKv+0Wz4zSkV/GbzAuJP3/HM=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=kdQSn5h3/5jQKY4Mp/ETy4Kp+ayiUkVchQpTkADQKlB6xFWg+cKIBu946dVB7WdBcoemuxSqrWE2dqQCPfsRmZZotgHNAKfxi4DuN1r8SNKVHpyhnUAaYRu27bOBwPdJhq+0o1nF1I3rClImOY1h+MxoTwIBuLgk9Zsj1MOJxcc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=d+f/elGh; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="d+f/elGh" Received: by smtp.kernel.org (Postfix) with ESMTPSA id B79E2C4CEE7; Wed, 28 May 2025 08:10:27 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419830; bh=ujQDYZVP7hXF+6mTGhNSKv+0Wz4zSkV/GbzAuJP3/HM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=d+f/elGhV+HtzzVj2+v1eOADh4nj6h09oRUDmKUQLLikWGCIQ9E5JLT/xlCtk+f0j /bv00LoEuLGORPJh6GCteb8HpOQkuRRUq1CUWsf+AE8+UxnLukSnJ+zuIYocwWBdYt xQ2QVKiOpvJ0d/e1PjPhbKoIit5rTb6YEoPSmtkfCH/aGKGx0SJowLkGXu8++oxU9+ jyjjNgMhdad9O2ur+HtY2+WTmj9miQ5AobKteBpBGvp89WaB4vZReSIW0yowOWZPc2 aHP03+B6stOSlQsrz/HHD4M7LbqKuIB3n4WHZrPjtkcSVm+lWfPj8QzZG1LIpdbU4O xUJ7DiWlbqifw== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 12/43] sched: Clean up and standardize #if/#else/#endif markers in sched/pelt.[ch] Date: Wed, 28 May 2025 10:08:53 +0200 Message-ID: <20250528080924.2273858-13-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/pelt.c | 4 ++-- kernel/sched/pelt.h | 13 ++++++------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 7a8534a2deff..7ec1ca5a6178 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -413,7 +413,7 @@ int update_hw_load_avg(u64 now, struct rq *rq, u64 capa= city) =20 return 0; } -#endif +#endif /* CONFIG_SCHED_HW_PRESSURE */ =20 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ /* @@ -466,7 +466,7 @@ int update_irq_load_avg(struct rq *rq, u64 running) =20 return ret; } -#endif +#endif /* CONFIG_HAVE_SCHED_AVG_IRQ */ =20 /* * Load avg and utiliztion metrics need to be updated periodically and bef= ore diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h index f4f6a0875c66..2635ec239bff 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -15,7 +15,7 @@ static inline u64 hw_load_avg(struct rq *rq) { return READ_ONCE(rq->avg_hw.load_avg); } -#else +#else /* !CONFIG_SCHED_HW_PRESSURE: */ static inline int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) { @@ -26,7 +26,7 @@ static inline u64 hw_load_avg(struct rq *rq) { return 0; } -#endif +#endif /* !CONFIG_SCHED_HW_PRESSURE */ =20 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ int update_irq_load_avg(struct rq *rq, u64 running); @@ -174,15 +174,15 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cf= s_rq) =20 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; } -#else +#else /* !CONFIG_CFS_BANDWIDTH: */ static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { } static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { return rq_clock_pelt(rq_of(cfs_rq)); } -#endif +#endif /* !CONFIG_CFS_BANDWIDTH */ =20 -#else +#else /* !CONFIG_SMP: */ =20 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) @@ -231,6 +231,5 @@ static inline void update_idle_rq_clock_pelt(struct rq *rq) { } =20 static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { } -#endif - =20 +#endif /* !CONFIG_SMP */ --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6174825DAFB for ; Wed, 28 May 2025 08:10:34 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419834; cv=none; b=INPz0MnloHk4th61K5zryXk0peyv396C6Cxz0Hss7H19G+ecjZWqza4dHT5qhKN77vjo/M4JmEoxH/OIopoAMNyfeAW5cMdWjl8RElRTwjTOfpOPqn3iqwnOxte/FdjffHqtS/R0sdElKxMCGDOPpr1s4R9fdn04ov9V+iDG6e0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419834; c=relaxed/simple; bh=YhyixJygq2Xiz6zbjdR3uAQI14g8MbE2KC8BX2OleAs=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=DsYxVjReSUef2ak0hpaMl0kHNS/3si78Hc23r8pYU7IwuKP9J4G2VWDfOWJ9zoE9aOoHwJPcNdJl1zO0HaQ1i1Swf4rnUobb7XfH61DZtQ9FqdDnWBG5JoUllvDrPTxoXgiqEHGeUVKZJ77In0x9t1fK+rOkWUGYqZQtWb25ESs= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=FtJ8oNnW; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="FtJ8oNnW" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3D598C4CEEF; Wed, 28 May 2025 08:10:31 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419834; bh=YhyixJygq2Xiz6zbjdR3uAQI14g8MbE2KC8BX2OleAs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=FtJ8oNnW5z8z/hE2XhrDyi1ixXdnbqfGxplON1XXbabCeuW9VcV7jNzmFfUVzcdQ5 ZWYe9YK/r3PQre8oAtjpaewi0Jkevw74AdMzw9OYoEQQxy3K/yPpZ07VQEMEZeZPw7 T5DvES7qFqLR6ADIoVcm2+7neQDM6fSktZu1JgYWpxb66pLuCDd1wWm1oZB1At/u3d ezmSARnrBOMBpa2vOy/YAwhqRlMcTEI2ycNOesWq/dQcaWm08ibBFBLxG+xeJ74yk4 PQdiaL5G3y+o7GbQhG9BKS2BaqusoLCBYGP/yeqW9qu1QjEeumyY0oOHxXZfneBq3v hg2a/LtVRqELA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 13/43] sched: Clean up and standardize #if/#else/#endif markers in sched/psi.c Date: Wed, 28 May 2025 10:08:54 +0200 Message-ID: <20250528080924.2273858-14-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/psi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 1396674fa722..c48aba6ab0a6 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1035,7 +1035,7 @@ void psi_account_irqtime(struct rq *rq, struct task_s= truct *curr, struct task_st psi_schedule_rtpoll_work(group, 1, false); } while ((group =3D group->parent)); } -#endif +#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ =20 /** * psi_memstall_enter - mark the beginning of a memory stall section @@ -1651,7 +1651,7 @@ static const struct proc_ops psi_irq_proc_ops =3D { .proc_poll =3D psi_fop_poll, .proc_release =3D psi_fop_release, }; -#endif +#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ =20 static int __init psi_proc_init(void) { --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 47BB024A07C for ; Wed, 28 May 2025 08:10:37 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419838; cv=none; b=Sq2LHvJTceVkvYIGcsSzd7JMrqcPm3bZnXhDZnSQne1NLn4SYNz6eTwt1z4HDp/CSwBBq0z5YFGuvLWlmY45747SwxeIpps7CsD7aWW5wnLvRCA0yU8mYcZ5IIInL/liHuDjYN1pe0ETvEEXh1LABm4fVh+AGkS6aX1huwdo8LA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419838; c=relaxed/simple; bh=OMqjZ/k+TIxk2pcXS6Tt2oHRwQ8oYjlx5iJo+lWD9KY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=NtOlpVGZZaBP4QykdfYICzvGR496BIVkKFjfnPsvBF/nUeAndPujJFH12rxI6Ps4gByzTFYqNKpdiQNenfxNXaZHlEzNaNvP3dqwJojnSM2HZQkrP28vcKZtkyjOhYVg12HxYqj7555j8hf0ng3/+P8xSUHvWg2Ky5GKJnD8yYA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=hFpMVfpY; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="hFpMVfpY" Received: by smtp.kernel.org (Postfix) with ESMTPSA id BB306C4CEE7; Wed, 28 May 2025 08:10:34 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419837; bh=OMqjZ/k+TIxk2pcXS6Tt2oHRwQ8oYjlx5iJo+lWD9KY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=hFpMVfpY4szmSyDM4mTjNEh30JOXJZYurgu5QOhWCF1G0m9uL0LKtFQXqYvbKLcuq SjFnMkab1C4VIk5SdchQuRZdA+xWC91aVEb12q9O3e2NkwfStV+YFnTbKwR4lGRSdF fvAAtATMKhPouZFU8d/ltHbzkWRs51lg1Ucv0WX91QJxFdbtI5KWmko4fSztr0aifL NxlGQPYtuUXln6ii32NdwXGxmXEP+W6kYgF7TVLJ0o0bbwzVKLMXw3nQ8MIqkg7Zs6 DbVNUbsUMdYqu5GiZM2nndiyKhToOnhnKyWkjPBzgzsYBxNRHp27JAxBKU1q8fWGnf ED+xmEYOrGzXA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 14/43] sched: Clean up and standardize #if/#else/#endif markers in sched/rt.c Date: Wed, 28 May 2025 10:08:55 +0200 Message-ID: <20250528080924.2273858-15-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/rt.c | 54 +++++++++++++++++++++++++++------------------------= --- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e40422c37033..85768c81531c 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -60,7 +60,7 @@ static int __init sched_rt_sysctl_init(void) return 0; } late_initcall(sched_rt_sysctl_init); -#endif +#endif /* CONFIG_SYSCTL */ =20 void init_rt_rq(struct rt_rq *rt_rq) { @@ -291,7 +291,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct = task_group *parent) return 0; } =20 -#else /* CONFIG_RT_GROUP_SCHED */ +#else /* !CONFIG_RT_GROUP_SCHED: */ =20 #define rt_entity_is_task(rt_se) (1) =20 @@ -327,7 +327,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct = task_group *parent) { return 1; } -#endif /* CONFIG_RT_GROUP_SCHED */ +#endif /* !CONFIG_RT_GROUP_SCHED */ =20 #ifdef CONFIG_SMP =20 @@ -430,7 +430,7 @@ static void dequeue_pushable_task(struct rq *rq, struct= task_struct *p) } } =20 -#else +#else /* !CONFIG_SMP: */ =20 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct= *p) { @@ -443,7 +443,7 @@ static inline void dequeue_pushable_task(struct rq *rq,= struct task_struct *p) static inline void rt_queue_push_tasks(struct rq *rq) { } -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 static void enqueue_top_rt_rq(struct rt_rq *rt_rq); static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count); @@ -485,12 +485,12 @@ static inline bool rt_task_fits_capacity(struct task_= struct *p, int cpu) =20 return cpu_cap >=3D min(min_cap, max_cap); } -#else +#else /* !CONFIG_UCLAMP_TASK: */ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) { return true; } -#endif +#endif /* !CONFIG_UCLAMP_TASK */ =20 #ifdef CONFIG_RT_GROUP_SCHED =20 @@ -798,9 +798,9 @@ static void balance_runtime(struct rt_rq *rt_rq) raw_spin_lock(&rt_rq->rt_runtime_lock); } } -#else /* !CONFIG_SMP */ +#else /* !CONFIG_SMP: */ static inline void balance_runtime(struct rt_rq *rt_rq) {} -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) { @@ -930,7 +930,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_r= q) return 0; } =20 -#else /* !CONFIG_RT_GROUP_SCHED */ +#else /* !CONFIG_RT_GROUP_SCHED: */ =20 typedef struct rt_rq *rt_rq_iter_t; =20 @@ -982,7 +982,7 @@ static void __enable_runtime(struct rq *rq) { } static void __disable_runtime(struct rq *rq) { } #endif =20 -#endif /* CONFIG_RT_GROUP_SCHED */ +#endif /* !CONFIG_RT_GROUP_SCHED */ =20 static inline int rt_se_prio(struct sched_rt_entity *rt_se) { @@ -1033,7 +1033,7 @@ static void update_curr_rt(struct rq *rq) do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); } } -#endif +#endif /* CONFIG_RT_GROUP_SCHED */ } =20 static void @@ -1107,14 +1107,14 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int = prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); } =20 -#else /* CONFIG_SMP */ +#else /* !CONFIG_SMP: */ =20 static inline void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} static inline void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} =20 -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED static void @@ -1155,12 +1155,12 @@ dec_rt_prio(struct rt_rq *rt_rq, int prio) dec_rt_prio_smp(rt_rq, prio, prev_prio); } =20 -#else +#else /* !(CONFIG_SMP || CONFIG_RT_GROUP_SCHED): */ =20 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} =20 -#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ +#endif /* !(CONFIG_SMP || CONFIG_RT_GROUP_SCHED) */ =20 #ifdef CONFIG_RT_GROUP_SCHED =20 @@ -1182,7 +1182,7 @@ dec_rt_group(struct sched_rt_entity *rt_se, struct rt= _rq *rt_rq) WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); } =20 -#else /* CONFIG_RT_GROUP_SCHED */ +#else /* !CONFIG_RT_GROUP_SCHED: */ =20 static void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) @@ -1192,7 +1192,7 @@ inc_rt_group(struct sched_rt_entity *rt_se, struct rt= _rq *rt_rq) static inline void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} =20 -#endif /* CONFIG_RT_GROUP_SCHED */ +#endif /* !CONFIG_RT_GROUP_SCHED */ =20 static inline unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) @@ -1682,7 +1682,7 @@ static void wakeup_preempt_rt(struct rq *rq, struct t= ask_struct *p, int flags) */ if (p->prio =3D=3D donor->prio && !test_tsk_need_resched(rq->curr)) check_preempt_equal_prio(rq, p); -#endif +#endif /* CONFIG_SMP */ } =20 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, = bool first) @@ -2509,11 +2509,11 @@ prio_changed_rt(struct rq *rq, struct task_struct *= p, int oldprio) */ if (p->prio > rq->rt.highest_prio.curr) resched_curr(rq); -#else +#else /* !CONFIG_SMP: */ /* For UP simply resched on drop of prio */ if (oldprio < p->prio) resched_curr(rq); -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ } else { /* * This task is not running, but if it is @@ -2549,9 +2549,9 @@ static void watchdog(struct rq *rq, struct task_struc= t *p) } } } -#else +#else /* !CONFIG_POSIX_TIMERS: */ static inline void watchdog(struct rq *rq, struct task_struct *p) { } -#endif +#endif /* !CONFIG_POSIX_TIMERS */ =20 /* * scheduler tick hitting a task of our scheduling class. @@ -2620,7 +2620,7 @@ static int task_is_throttled_rt(struct task_struct *p= , int cpu) =20 return rt_rq_throttled(rt_rq); } -#endif +#endif /* CONFIG_SCHED_CORE */ =20 DEFINE_SCHED_CLASS(rt) =3D { =20 @@ -2643,7 +2643,7 @@ DEFINE_SCHED_CLASS(rt) =3D { .task_woken =3D task_woken_rt, .switched_from =3D switched_from_rt, .find_lock_rq =3D find_lock_lowest_rq, -#endif +#endif /* !CONFIG_SMP */ =20 .task_tick =3D task_tick_rt, =20 @@ -2887,7 +2887,7 @@ int sched_rt_can_attach(struct task_group *tg, struct= task_struct *tsk) return 1; } =20 -#else /* !CONFIG_RT_GROUP_SCHED */ +#else /* !CONFIG_RT_GROUP_SCHED: */ =20 #ifdef CONFIG_SYSCTL static int sched_rt_global_constraints(void) @@ -2895,7 +2895,7 @@ static int sched_rt_global_constraints(void) return 0; } #endif /* CONFIG_SYSCTL */ -#endif /* CONFIG_RT_GROUP_SCHED */ +#endif /* !CONFIG_RT_GROUP_SCHED */ =20 #ifdef CONFIG_SYSCTL static int sched_rt_global_validate(void) --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6681024BBEE for ; Wed, 28 May 2025 08:10:41 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419841; cv=none; b=pTOKosEhcOC+k4fSV8Y5kO/F51UBwfJM/Sb7iIZJn9OV7lYrYujTk6A5mu8YVZuCLySUFSb5LqaVXeFktJj4ESk5DX25E3kYJtzaXP3NYRXlT1CwKikUkADTwsVTP4AeWbfUw4DE8whS80CJ/1AMnPa2BNaaRD3QVLo5Lq/DLAg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419841; c=relaxed/simple; bh=u+72SrfGUPAKyn9eEe9IZIM4gVlxh2Bgyzf8yIigbn4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=F6rmNwdM40AdvptqKSKqiMMZimJzs3GUHHHyBb+x15k4YV0jKuU8tQ8wnawBg6UuqSQqZ68VnkJA7pyDHfimUekK6Kez/EVND99+IA6yipfLEhcl/T7mu6lhA4dtr4WG/i+9iOXEpkeRmpNN7XIVFrDKAG8nbyrqncXDQ/sZBYM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=C78Ux+ss; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="C78Ux+ss" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4318CC4CEEB; Wed, 28 May 2025 08:10:38 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419841; bh=u+72SrfGUPAKyn9eEe9IZIM4gVlxh2Bgyzf8yIigbn4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=C78Ux+ss50FSa/ZWGYok8zqjMLhvFXXqOMlI70tvcaXmfTcTLZvrbLiAbYU2JMgMO KS+EFGVqHlMKg+1/XP9qR8SpPvah7P//PdvqRJSf6ULmuUhILdbc0cy1Q6J3H++QdA 0fWvN9sDveULIEXBlu+bKlRKmmw8+a+B/ZXBYpu01ldWxxjKYTMwXkybngohnmczz7 /8OmCnriaaPSI70cBi4JcFJGJkceJtJGK7wgv19qtxXUZKfS2BLxmfS9+WrxyUu0VL xNrDBlvNCaoK5WCCDigiB8SArEXBH6ESBZ/O9qt/LU2ucfEcXV4ciylMdWgMrvwsCf tPVsEr8ifLeSA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 15/43] sched: Clean up and standardize #if/#else/#endif markers in sched/sched.h Date: Wed, 28 May 2025 10:08:56 +0200 Message-ID: <20250528080924.2273858-16-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/sched.h | 82 +++++++++++++++++++++++++++---------------------= ---- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c5a6a503eb6d..aec0c8253c1c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -424,7 +424,7 @@ struct cfs_bandwidth { int nr_burst; u64 throttled_time; u64 burst_time; -#endif +#endif /* CONFIG_CFS_BANDWIDTH */ }; =20 /* Task group related information */ @@ -442,15 +442,15 @@ struct task_group { /* runqueue "owned" by this group on each CPU */ struct cfs_rq **cfs_rq; unsigned long shares; -#ifdef CONFIG_SMP +#ifdef CONFIG_SMP /* * load_avg can be heavily contended at clock tick time, so put * it in its own cache-line separated from the fields above which * will also be accessed at each tick. */ atomic_long_t load_avg ____cacheline_aligned; -#endif -#endif +#endif /* CONFIG_SMP */ +#endif /* CONFIG_FAIR_GROUP_SCHED */ =20 #ifdef CONFIG_RT_GROUP_SCHED struct sched_rt_entity **rt_se; @@ -531,7 +531,7 @@ extern void free_fair_sched_group(struct task_group *tg= ); extern int alloc_fair_sched_group(struct task_group *tg, struct task_group= *parent); extern void online_fair_sched_group(struct task_group *tg); extern void unregister_fair_sched_group(struct task_group *tg); -#else +#else /* !CONFIG_FAIR_GROUP_SCHED: */ static inline void free_fair_sched_group(struct task_group *tg) { } static inline int alloc_fair_sched_group(struct task_group *tg, struct tas= k_group *parent) { @@ -539,7 +539,7 @@ static inline int alloc_fair_sched_group(struct task_gr= oup *tg, struct task_grou } static inline void online_fair_sched_group(struct task_group *tg) { } static inline void unregister_fair_sched_group(struct task_group *tg) { } -#endif +#endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, @@ -576,22 +576,22 @@ extern int sched_group_set_idle(struct task_group *tg= , long idle); #ifdef CONFIG_SMP extern void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next); -#else /* !CONFIG_SMP */ +#else /* !CONFIG_SMP: */ static inline void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) { } -#endif /* CONFIG_SMP */ -#else /* !CONFIG_FAIR_GROUP_SCHED */ +#endif /* !CONFIG_SMP */ +#else /* !CONFIG_FAIR_GROUP_SCHED: */ static inline int sched_group_set_shares(struct task_group *tg, unsigned l= ong shares) { return 0; } static inline int sched_group_set_idle(struct task_group *tg, long idle) {= return 0; } -#endif /* CONFIG_FAIR_GROUP_SCHED */ +#endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 -#else /* CONFIG_CGROUP_SCHED */ +#else /* !CONFIG_CGROUP_SCHED: */ =20 struct cfs_bandwidth { }; =20 static inline bool cfs_task_bw_constrained(struct task_struct *p) { return= false; } =20 -#endif /* CONFIG_CGROUP_SCHED */ +#endif /* !CONFIG_CGROUP_SCHED */ =20 extern void unregister_rt_sched_group(struct task_group *tg); extern void free_rt_sched_group(struct task_group *tg); @@ -859,9 +859,9 @@ struct dl_rq { * of the leftmost (earliest deadline) element. */ struct rb_root_cached pushable_dl_tasks_root; -#else +#else /* !CONFIG_SMP: */ struct dl_bw dl_bw; -#endif +#endif /* !CONFIG_SMP */ /* * "Active utilization" for this runqueue: increased when a * task wakes up (becomes TASK_RUNNING) and decreased when a @@ -1008,7 +1008,7 @@ struct root_domain { /* These atomics are updated outside of a lock */ atomic_t rto_loop_next; atomic_t rto_loop_start; -#endif +#endif /* HAVE_RT_PUSH_IPI */ /* * The "RT overload" flag: it gets set if a CPU has more than * one runnable RT task. @@ -1294,7 +1294,7 @@ struct rq { unsigned int core_forceidle_seq; unsigned int core_forceidle_occupation; u64 core_forceidle_start; -#endif +#endif /* CONFIG_SCHED_CORE */ =20 /* Scratch cpumask to be temporarily used under rq_lock */ cpumask_var_t scratch_mask; @@ -1313,13 +1313,13 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_r= q) return cfs_rq->rq; } =20 -#else +#else /* !CONFIG_FAIR_GROUP_SCHED: */ =20 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) { return container_of(cfs_rq, struct rq, cfs); } -#endif +#endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 static inline int cpu_of(struct rq *rq) { @@ -1500,6 +1500,7 @@ static inline bool sched_group_cookie_match(struct rq= *rq, } =20 #endif /* !CONFIG_SCHED_CORE */ + #ifdef CONFIG_RT_GROUP_SCHED # ifdef CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED DECLARE_STATIC_KEY_FALSE(rt_group_sched); @@ -1507,16 +1508,16 @@ static inline bool rt_group_sched_enabled(void) { return static_branch_unlikely(&rt_group_sched); } -# else +# else /* !CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED: */ DECLARE_STATIC_KEY_TRUE(rt_group_sched); static inline bool rt_group_sched_enabled(void) { return static_branch_likely(&rt_group_sched); } -# endif /* CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED */ -#else +# endif /* !CONFIG_RT_GROUP_SCHED_DEFAULT_DISABLED */ +#else /* !CONFIG_RT_GROUP_SCHED: */ # define rt_group_sched_enabled() false -#endif /* CONFIG_RT_GROUP_SCHED */ +#endif /* !CONFIG_RT_GROUP_SCHED */ =20 static inline void lockdep_assert_rq_held(struct rq *rq) { @@ -1574,9 +1575,9 @@ static inline void update_idle_core(struct rq *rq) __update_idle_core(rq); } =20 -#else +#else /* !CONFIG_SCHED_SMT: */ static inline void update_idle_core(struct rq *rq) { } -#endif +#endif /* !CONFIG_SCHED_SMT */ =20 #ifdef CONFIG_FAIR_GROUP_SCHED =20 @@ -1757,7 +1758,7 @@ static inline void scx_rq_clock_invalidate(struct rq = *rq) WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID); } =20 -#else /* !CONFIG_SCHED_CLASS_EXT */ +#else /* !CONFIG_SCHED_CLASS_EXT: */ #define scx_enabled() false #define scx_switched_all() false =20 @@ -2174,7 +2175,7 @@ static inline void set_task_rq(struct task_struct *p,= unsigned int cpu) tg =3D &root_task_group; p->rt.rt_rq =3D tg->rt_rq[cpu]; p->rt.parent =3D tg->rt_se[cpu]; -#endif +#endif /* CONFIG_RT_GROUP_SCHED */ } =20 #else /* !CONFIG_CGROUP_SCHED: */ @@ -2200,7 +2201,7 @@ static inline void __set_task_cpu(struct task_struct = *p, unsigned int cpu) smp_wmb(); WRITE_ONCE(task_thread_info(p)->cpu, cpu); p->wake_cpu =3D cpu; -#endif +#endif /* CONFIG_SMP */ } =20 /* @@ -2429,7 +2430,7 @@ struct sched_class { void (*rq_offline)(struct rq *rq); =20 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); -#endif +#endif /* CONFIG_SMP */ =20 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); void (*task_fork)(struct task_struct *p); @@ -2954,7 +2955,7 @@ static inline bool rq_order_less(struct rq *rq1, stru= ct rq *rq2) /* * __sched_core_flip() relies on SMT having cpu-id lock order. */ -#endif +#endif /* CONFIG_SCHED_CORE */ return rq1->cpu < rq2->cpu; } =20 @@ -3145,6 +3146,7 @@ extern void print_rt_rq(struct seq_file *m, int cpu, = struct rt_rq *rt_rq); extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq); =20 extern void resched_latency_warn(int cpu, u64 latency); + #ifdef CONFIG_NUMA_BALANCING extern void show_numa_stats(struct task_struct *p, struct seq_file *m); extern void @@ -3254,14 +3256,14 @@ static inline u64 irq_time_read(int cpu) return total; } =20 -#else +#else /* !CONFIG_IRQ_TIME_ACCOUNTING: */ =20 static inline int irqtime_enabled(void) { return 0; } =20 -#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ +#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ =20 #ifdef CONFIG_CPU_FREQ =20 @@ -3355,9 +3357,9 @@ static inline unsigned long cpu_util_rt(struct rq *rq) return READ_ONCE(rq->avg_rt.util_avg); } =20 -#else /* !CONFIG_SMP */ +#else /* !CONFIG_SMP: */ static inline bool update_other_load_avgs(struct rq *rq) { return false; } -#endif /* CONFIG_SMP */ +#endif /* !CONFIG_SMP */ =20 #ifdef CONFIG_UCLAMP_TASK =20 @@ -3537,13 +3539,13 @@ static inline bool sched_energy_enabled(void) =20 extern struct cpufreq_governor schedutil_gov; =20 -#else /* ! (CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ +#else /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL): */ =20 #define perf_domain_span(pd) NULL =20 static inline bool sched_energy_enabled(void) { return false; } =20 -#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL */ +#endif /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ =20 #ifdef CONFIG_MEMBARRIER =20 @@ -3569,7 +3571,7 @@ static inline void membarrier_switch_mm(struct rq *rq, WRITE_ONCE(rq->membarrier_state, membarrier_state); } =20 -#else /* !CONFIG_MEMBARRIER :*/ +#else /* !CONFIG_MEMBARRIER: */ =20 static inline void membarrier_switch_mm(struct rq *rq, struct mm_struct *prev_mm, @@ -3590,7 +3592,7 @@ static inline bool is_per_cpu_kthread(struct task_str= uct *p) =20 return true; } -#endif +#endif /* CONFIG_SMP */ =20 extern void swake_up_all_locked(struct swait_queue_head *q); extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_qu= eue *wait); @@ -3910,7 +3912,7 @@ bool task_is_pushable(struct rq *rq, struct task_stru= ct *p, int cpu) =20 return false; } -#endif +#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_RT_MUTEXES =20 @@ -3954,7 +3956,7 @@ extern void check_class_changed(struct rq *rq, struct= task_struct *p, #ifdef CONFIG_SMP extern struct balance_callback *splice_balance_callbacks(struct rq *rq); extern void balance_callbacks(struct rq *rq, struct balance_callback *head= ); -#else +#else /* !CONFIG_SMP: */ =20 static inline struct balance_callback *splice_balance_callbacks(struct rq = *rq) { @@ -3965,7 +3967,7 @@ static inline void balance_callbacks(struct rq *rq, s= truct balance_callback *hea { } =20 -#endif +#endif /* !CONFIG_SMP */ =20 #ifdef CONFIG_SCHED_CLASS_EXT /* --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E3447266571 for ; Wed, 28 May 2025 08:10:44 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419845; cv=none; b=kZVKFLiNoRqRyMbX3h+2KpRJRdcpVS7tQOYjvTcBt8SznGGlnKFI81OCSNeACgl+G8hZokwnuanxUK8Z+WxARMHjbbNJFtEYOPFGwNqbd7U/hEOWYqhkzY2VqnNYmKv4jQy+ZrzOCgUNhzYDsQCUDFR43nWMAmSTg43cXpYsB8o= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419845; c=relaxed/simple; bh=DqPTk2T9hd5pdXj/uwXvK+7QERI5wMwFXM+0Isu80bU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=AonlM+bwoln29ber7M5W5IkwH6krUVTg4cQob+gZit/j2jZnPJ4mNwzTU94qLCJx8fSq8vjWhGQZ/ZaJe5/tGNfmNXcPiP7ONc5fRMhVCwMXjaweYDzzj3PUyIDNJ/cryo8d1IkqdOMyzUPg/NuR4Qxh+eGjZoC30mq7hxm5y8E= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Q/Wlo97Y; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Q/Wlo97Y" Received: by smtp.kernel.org (Postfix) with ESMTPSA id C277FC4CEE7; Wed, 28 May 2025 08:10:41 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419844; bh=DqPTk2T9hd5pdXj/uwXvK+7QERI5wMwFXM+0Isu80bU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Q/Wlo97YMGxgyr7YA633lUiVZ4AR0qFhhBzaoTT7/0D7u4/l9GbxbnqrFD/RnRc/u Nc7hvwwFw26e4PSBccBk4j8A//upKHyZc9yaIGnRwXY7JQbwdWKubqUZgSOHCQqNHk bAJ9ZYzlLpv3gOxrG0L9VbzYoWddCw8pHngR4Bha2dfMbRKzjzHhMuGI9qeoKGFha5 3ToPje/z3M4y+hqGqOwqkl5aisFPOEtGKJwKAaozMhHTSK5uOULKIAahcyZLj5eO/q CP8XwR1rqsLdSwyqW9SSsTCsS9gFqe9DdAB6skIOrYB/mvSsVC5jdoclfBUzQc/NmR Ehwj4N+Z9T56w== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 16/43] sched: Clean up and standardize #if/#else/#endif markers in sched/stats.[ch] Date: Wed, 28 May 2025 10:08:57 +0200 Message-ID: <20250528080924.2273858-17-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/stats.c | 2 +- kernel/sched/stats.h | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 4346fd81c31f..def01886e93f 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -163,7 +163,7 @@ static int show_schedstat(struct seq_file *seq, void *v) sd->ttwu_move_balance); } rcu_read_unlock(); -#endif +#endif /* CONFIG_SMP */ } return 0; } diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 452826df6ae1..26f3fd4d34ce 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -112,10 +112,10 @@ void psi_task_switch(struct task_struct *prev, struct= task_struct *next, bool sleep); #ifdef CONFIG_IRQ_TIME_ACCOUNTING void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct t= ask_struct *prev); -#else +#else /* !CONFIG_IRQ_TIME_ACCOUNTING: */ static inline void psi_account_irqtime(struct rq *rq, struct task_struct *= curr, struct task_struct *prev) {} -#endif /*CONFIG_IRQ_TIME_ACCOUNTING */ +#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ /* * PSI tracks state that persists across sleeps, such as iowaits and * memory stalls. As a result, it has to distinguish between sleeps, @@ -220,7 +220,7 @@ static inline void psi_sched_switch(struct task_struct = *prev, psi_task_switch(prev, next, sleep); } =20 -#else /* CONFIG_PSI */ +#else /* !CONFIG_PSI: */ static inline void psi_enqueue(struct task_struct *p, bool migrate) {} static inline void psi_dequeue(struct task_struct *p, bool migrate) {} static inline void psi_ttwu_dequeue(struct task_struct *p) {} @@ -229,7 +229,7 @@ static inline void psi_sched_switch(struct task_struct = *prev, bool sleep) {} static inline void psi_account_irqtime(struct rq *rq, struct task_struct *= curr, struct task_struct *prev) {} -#endif /* CONFIG_PSI */ +#endif /* !CONFIG_PSI */ =20 #ifdef CONFIG_SCHED_INFO /* @@ -334,6 +334,6 @@ sched_info_switch(struct rq *rq, struct task_struct *pr= ev, struct task_struct *n # define sched_info_enqueue(rq, t) do { } while (0) # define sched_info_dequeue(rq, t) do { } while (0) # define sched_info_switch(rq, t, next) do { } while (0) -#endif /* CONFIG_SCHED_INFO */ +#endif /* !CONFIG_SCHED_INFO */ =20 #endif /* _KERNEL_STATS_H */ --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id DCFEA24DCFE for ; Wed, 28 May 2025 08:10:48 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419848; cv=none; b=HD89iUsqjMyJ8ueDVx3W347WJHHcYNyQXXcP8TwcQ1pURdZW9I6yobxFXASNQ9X+snrIlu+nnVxfW39Hvry/hkt3CgT9y1vsaDYll61GyBy6RbapQvLVu9wnE/Ka0WZynAonNu68X/piJeCXUAUjE4yXGRsMO/VM5ekrF2/ETIY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419848; c=relaxed/simple; bh=EUMq3U+2g1Y6gO9i90Y9JxXLAlxi7fyMP9TxxTt6mJA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=oWUWc+rYio+cT8WMXtjH/M+tYAgQ/dyFEprHYW2jNF76c5/dlRyAh99sMuLYCWpb1+lLLMv/BoHITqQU0qvdm35YBdx7WD6JcLCvZVLwYejZv+1tzwTNtRWbaqgo8EJfX8xjx9qDt/XRvtSHt3Q32Tbu/okHtL410ctyAmzEf4A= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=lxTG9bRp; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="lxTG9bRp" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 49F1AC4CEE7; Wed, 28 May 2025 08:10:45 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419848; bh=EUMq3U+2g1Y6gO9i90Y9JxXLAlxi7fyMP9TxxTt6mJA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=lxTG9bRpDV9A3L06A/lXpePQ2if1v2R3D6sU47vLRC8ys7yeiG1KyuIjc7v/VHHxm CZEd/pOnIlMow2Qn/nPjqeMFJmo7WDHlfFrNVZVBZmN5/XCoNG5YZVZUyQtZlZ+C+n /zt8BmB08AvFRttmJDvoymGwLsSzsWxs38nySmlrMruVOxnICLNZWMxm3zA2AW7JGU EaUYTcx5WgZBeBJPWpXQ8wPdXhnwIHR+gnPENblUZ9rCuqY/yMYvPBL/TSmlyzEkOD SN5tUyoTiEKW9zqJpg26dJFR60q+L/ixpPfGFKw50p55rFLNjP88Hx8SEA6WUd6Lsv quJPbKIyZmqXQ== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 17/43] sched: Clean up and standardize #if/#else/#endif markers in sched/syscalls.c Date: Wed, 28 May 2025 10:08:58 +0200 Message-ID: <20250528080924.2273858-18-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/syscalls.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index 547c1f05b667..5cb5e9487f0d 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -174,7 +174,7 @@ SYSCALL_DEFINE1(nice, int, increment) return 0; } =20 -#endif +#endif /* __ARCH_WANT_SYS_NICE */ =20 /** * task_prio - return the priority value of a given task. @@ -255,8 +255,7 @@ int sched_core_idle_cpu(int cpu) =20 return idle_cpu(cpu); } - -#endif +#endif /* CONFIG_SCHED_CORE */ =20 /** * find_process_by_pid - find a process with a matching PID value. @@ -448,7 +447,7 @@ static inline int uclamp_validate(struct task_struct *p, } static void __setscheduler_uclamp(struct task_struct *p, const struct sched_attr *attr) { } -#endif +#endif /* !CONFIG_UCLAMP_TASK */ =20 /* * Allow unprivileged RT tasks to decrease priority. @@ -658,7 +657,7 @@ int __sched_setscheduler(struct task_struct *p, goto unlock; } } -#endif +#endif /* CONFIG_SMP */ } =20 /* Re-check policy now with rq lock held: */ --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6B09124DD0F for ; Wed, 28 May 2025 08:10:51 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419853; cv=none; b=o4rBVjSoiG8LgsVYMX/IlQY10/jE34Kk/P9GPEu7AwvlJXC4Li9q9u5uzaN8Xe+GH6mzmGpRrDuYndCTwtNk3+fiV28NRCAkFZOenuWWHxOD6yNheizvoNyl0tIxwFThkzBuBUPcsfmI/cOQ0irCSUY4Ng9muWPbg6IapNECSoA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419853; c=relaxed/simple; bh=mGk3g1M31GRTi5uDYP8pJrBKo0TW7wSdq0MMUjWhrwY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=tlZ/OpQjScfqil/UGT9umy3hqWmO1752LX9Ae3mNQu9xHGxFSbZyfWJY8g9ExH5MqxCUNSwLPhUo5UPG2aO0OXWbrGSiQhIdDrPCT6jXjEaLCKVEZ67rTy7KnTfMFHU638iTKJUOWiZTbu1yOKowvWQWgsUefxCwLWCAIHSph2A= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=r08EkqhA; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="r08EkqhA" Received: by smtp.kernel.org (Postfix) with ESMTPSA id CB589C4CEEB; Wed, 28 May 2025 08:10:48 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419851; bh=mGk3g1M31GRTi5uDYP8pJrBKo0TW7wSdq0MMUjWhrwY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=r08EkqhAgoR/uAeiACeBHvkGaMlSZUjnp1+wcs5RcyRfhhdpj5tBazHsRM+dg2RYQ l+bgCtPAV6pMaBk0wnkCIBiRrEzIfEj2UcEbsMePz1FD8Z5s/5AKPkwP6EToNG5fok QupIG9Hdo3uqbxfUTveYW7vyGeaBkHPwlkEsyQD6z95yQsOaQOIGE/CN27CyVJNSfL HyWXqir16OKNILKnjvNEH8Hykfj3D+WUBQL05R7bn0e/kywnMDpCkV60CpA9Qa7z+q VbBl0ebG7iSGZNEr6FVhboM0twJfrhwvM1NVziI8FaUYsLelU2aKRLoCq+NZccbRUC ub9CSRDXbsDnA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 18/43] sched: Clean up and standardize #if/#else/#endif markers in sched/topology.c Date: Wed, 28 May 2025 10:08:59 +0200 Message-ID: <20250528080924.2273858-19-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" - Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/topology.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index a2a38e1b6f18..4f52b8e56c19 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -328,7 +328,7 @@ static int __init sched_energy_aware_sysctl_init(void) } =20 late_initcall(sched_energy_aware_sysctl_init); -#endif +#endif /* CONFIG_PROC_SYSCTL */ =20 static void free_pd(struct perf_domain *pd) { @@ -464,9 +464,9 @@ static bool build_perf_domains(const struct cpumask *cp= u_map) =20 return false; } -#else +#else /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL): */ static void free_pd(struct perf_domain *pd) { } -#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/ +#endif /* !(CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL) */ =20 static void free_rootdomain(struct rcu_head *rcu) { @@ -1613,7 +1613,7 @@ static int sched_domains_curr_level; int sched_max_numa_distance; static int *sched_domains_numa_distance; static struct cpumask ***sched_domains_numa_masks; -#endif +#endif /* CONFIG_NUMA */ =20 /* * SD_flags allowed in topology descriptions. @@ -1729,7 +1729,7 @@ sd_init(struct sched_domain_topology_level *tl, SD_WAKE_AFFINE); } =20 -#endif +#endif /* CONFIG_NUMA */ } else { sd->cache_nice_tries =3D 1; } --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D651A246776 for ; Wed, 28 May 2025 08:10:55 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419855; cv=none; b=L7FdIDM/DftP6iltWJ+gtgUR04Gz652WcrBdpGaKWLUbAyDmJtYCAY1OtfamW02mq0dg5mpZ4wYM4XkkLKGYsWNLz8RgoR4PqS3aEE8SrioXn2CJOxFRxwWQ+dNokG6SUqfR9V3TsDf4RFbXtYJhig9/FJJ3NB2PUSjDywzkSqE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419855; c=relaxed/simple; bh=ny6QSC9rijS4pGlrRodZrfbLqyCq2d1aVXezNqZIRqQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=BXJhP/79qQRBsFhxYWH9kBCyX9RSC2kthGbM7T9xpo9bb/VQgxLEDXBc8GHicbXSopYs4sWwD4YHuhhbnLf/n2qm5ZIYakD6TM8hsSsnJCUNq5yGYKJ4K9U7uw3f2sKE4msphlfv7Q36tySTCtUD/vGN1Chlc8IDhnjwBtq0q58= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=oEFJdf50; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="oEFJdf50" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 57E34C4CEEF; Wed, 28 May 2025 08:10:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419855; bh=ny6QSC9rijS4pGlrRodZrfbLqyCq2d1aVXezNqZIRqQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=oEFJdf509spyKk9yi8VjE/pxup1CikaJXd4pRE5b79X6fxtMWp9FW9NIIDLfQCdxz VvJoiEmUzEW2ZYXkEuyiwmBvLChEALTCa79O8nYRnuIyvgEiQLz0sFHXRe2M0yNYC/ wOFAgp0huc5mNyduo756CqcLihdhWBeltznOwyYmiN3o3KnwKwUmnqelsLII8zJDKS EshHMaox5Pj3oC+hkv/wrO3ho+MeYFIZR9Vjz7j9lLt0jOWXpdJZYRCFWaqe9G++rV V7j++EsTh6Ba7UQL3qJWBYyevJlxEcQYO3TPzEe0C/Ret6mGF096BG+B8A9hcQ8i6u zJYGLzhDzVjdQ== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 19/43] sched/smp: Always define sched_domains_mutex_lock()/unlock(), def_root_domain and sched_domains_mutex Date: Wed, 28 May 2025 10:09:00 +0200 Message-ID: <20250528080924.2273858-20-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Unconditionally build kernel/sched/topology.c and the main sched-domains locking primitives. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- include/linux/sched.h | 5 ----- kernel/sched/build_utility.c | 3 ++- kernel/sched/topology.c | 4 ++++ 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 45e5953b8f32..77fb5aa73d70 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -395,15 +395,10 @@ enum uclamp_id { UCLAMP_CNT }; =20 -#ifdef CONFIG_SMP extern struct root_domain def_root_domain; extern struct mutex sched_domains_mutex; extern void sched_domains_mutex_lock(void); extern void sched_domains_mutex_unlock(void); -#else -static inline void sched_domains_mutex_lock(void) { } -static inline void sched_domains_mutex_unlock(void) { } -#endif =20 struct sched_param { int sched_priority; diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c index bf9d8db94b70..5c485b2dfb95 100644 --- a/kernel/sched/build_utility.c +++ b/kernel/sched/build_utility.c @@ -83,9 +83,10 @@ #ifdef CONFIG_SMP # include "cpupri.c" # include "stop_task.c" -# include "topology.c" #endif =20 +#include "topology.c" + #ifdef CONFIG_SCHED_CORE # include "core_sched.c" #endif diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 4f52b8e56c19..7ce0bac86e94 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -15,6 +15,8 @@ void sched_domains_mutex_unlock(void) mutex_unlock(&sched_domains_mutex); } =20 +#ifdef CONFIG_SMP + /* Protected by sched_domains_mutex: */ static cpumask_var_t sched_domains_tmpmask; static cpumask_var_t sched_domains_tmpmask2; @@ -2855,3 +2857,5 @@ void partition_sched_domains(int ndoms_new, cpumask_v= ar_t doms_new[], partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); sched_domains_mutex_unlock(); } + +#endif /* CONFIG_SMP */ --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 46694246776 for ; Wed, 28 May 2025 08:10:59 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419859; cv=none; b=oCD+VN2Ba8PHZ8y5lWj3haQGTnoKS+39DlGaH2xbjjC6rZYpTpEIb9AJTY4wL8BGI9+DrXx+j00Py8rQ7ICG9xmbV3cCXenXGItrojDyR9k8KmhUP+OQkSLu3StfpGNWle3g4IsIy9O6/tKnmh+SYmkoeXAMPOV0Rxlci8mB+UM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419859; c=relaxed/simple; bh=yU2xSJgIzAxzjfJe00I6IsyI1BX5uQqICOIuYlH7idI=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=TXlThd1uj8zAOQ+fMwgmTFa+i/x4AT/SvEvSI7SBFJ2wGzIShnfweompy1VzSPScS7Kque6LVbsfVsVUW7q6gSYsFz+xs+L/wCrUr6VJ3p/wazbWOMEfHZbC5vPOg/F82/hD+7y0TEPLR/0j3eFhOhdxLM14n1X4GnGrg0Saikk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=py+toKK1; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="py+toKK1" Received: by smtp.kernel.org (Postfix) with ESMTPSA id DCFE2C4CEE7; Wed, 28 May 2025 08:10:55 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419859; bh=yU2xSJgIzAxzjfJe00I6IsyI1BX5uQqICOIuYlH7idI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=py+toKK1jcxymgpGN+zh6KkviBj78TnnkA/DgIBtEdawDrAyW1H6+6pZwBzQodHSR m+6WT0ULTjudlDsi9LJ1OOKhsMHLU87lbg+mZdIC+slgi0hEJTO6XdWWDCEsCwBDaj v9AWJdR7o5JKM4jjLsTboucfULiZopyFd57x2yYCpE3R+TT8NwaQykszhf8PGxG/09 RXPC8e7rLJ2VmZN2RM6GVUF6wO3D2z+bEFZzaA7j1uy2Z/VctGKd8IuAa1y0Vnjtzl YteqSmnUCEG+NbvIKhgh+MTiDvIGbW93rPSmhG1DPtPk4olxSKf4Eses8zQ0McRLOQ XYRfHA3y5Ik0g== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 20/43] sched/smp: Make SMP unconditional Date: Wed, 28 May 2025 10:09:01 +0200 Message-ID: <20250528080924.2273858-21-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Introduce transitory wrappers for functionality not yet converted to SMP. Note that this patch is pretty large, because there's no clear separation between various aspects of the SMP scheduler, it's basically a huge block of #ifdef CONFIG_SMP. A fair amount of it has to be switched on for it to boot and work on UP systems. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- include/linux/preempt.h | 9 --- include/linux/sched.h | 42 -------------- include/linux/sched/deadline.h | 4 -- include/linux/sched/idle.h | 4 -- include/linux/sched/nohz.h | 4 +- include/linux/sched/topology.h | 32 ----------- kernel/sched/build_policy.c | 6 +- kernel/sched/build_utility.c | 6 +- kernel/sched/core.c | 106 +++++------------------------------- kernel/sched/cpudeadline.h | 2 - kernel/sched/cpupri.h | 2 - kernel/sched/deadline.c | 95 -------------------------------- kernel/sched/debug.c | 12 ---- kernel/sched/fair.c | 115 -------------------------------------= -- kernel/sched/pelt.h | 53 ------------------ kernel/sched/rt.c | 6 +- kernel/sched/sched.h | 121 +------------------------------------= ---- kernel/sched/syscalls.c | 2 - kernel/sched/topology.c | 10 +--- 19 files changed, 31 insertions(+), 600 deletions(-) diff --git a/include/linux/preempt.h b/include/linux/preempt.h index b0af8d4ef6e6..1fad1c8a4c76 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -369,8 +369,6 @@ static inline void preempt_notifier_init(struct preempt= _notifier *notifier, =20 #endif =20 -#ifdef CONFIG_SMP - /* * Migrate-Disable and why it is undesired. * @@ -429,13 +427,6 @@ static inline void preempt_notifier_init(struct preemp= t_notifier *notifier, extern void migrate_disable(void); extern void migrate_enable(void); =20 -#else - -static inline void migrate_disable(void) { } -static inline void migrate_enable(void) { } - -#endif /* CONFIG_SMP */ - /** * preempt_disable_nested - Disable preemption inside a normally preempt d= isabled section * diff --git a/include/linux/sched.h b/include/linux/sched.h index 77fb5aa73d70..f60aaa7fc4ad 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -595,7 +595,6 @@ struct sched_entity { unsigned long runnable_weight; #endif =20 -#ifdef CONFIG_SMP /* * Per entity load average tracking. * @@ -603,7 +602,6 @@ struct sched_entity { * collide with read-mostly values above. */ struct sched_avg avg; -#endif }; =20 struct sched_rt_entity { @@ -833,7 +831,6 @@ struct task_struct { struct alloc_tag *alloc_tag; #endif =20 -#ifdef CONFIG_SMP int on_cpu; struct __call_single_node wake_entry; unsigned int wakee_flips; @@ -849,7 +846,6 @@ struct task_struct { */ int recent_used_cpu; int wake_cpu; -#endif int on_rq; =20 int prio; @@ -908,9 +904,7 @@ struct task_struct { cpumask_t *user_cpus_ptr; cpumask_t cpus_mask; void *migration_pending; -#ifdef CONFIG_SMP unsigned short migration_disabled; -#endif unsigned short migration_flags; =20 #ifdef CONFIG_PREEMPT_RCU @@ -942,10 +936,8 @@ struct task_struct { struct sched_info sched_info; =20 struct list_head tasks; -#ifdef CONFIG_SMP struct plist_node pushable_tasks; struct rb_node pushable_dl_tasks; -#endif =20 struct mm_struct *mm; struct mm_struct *active_mm; @@ -1834,7 +1826,6 @@ extern int cpuset_cpumask_can_shrink(const struct cpu= mask *cur, const struct cpu extern int task_can_attach(struct task_struct *p); extern int dl_bw_alloc(int cpu, u64 dl_bw); extern void dl_bw_free(int cpu, u64 dl_bw); -#ifdef CONFIG_SMP =20 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */ extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumas= k *new_mask); @@ -1852,33 +1843,6 @@ extern void release_user_cpus_ptr(struct task_struct= *p); extern int dl_task_check_affinity(struct task_struct *p, const struct cpum= ask *mask); extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); -#else -static inline void do_set_cpus_allowed(struct task_struct *p, const struct= cpumask *new_mask) -{ -} -static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct= cpumask *new_mask) -{ - /* Opencoded cpumask_test_cpu(0, new_mask) to avoid dependency on cpumask= .h */ - if ((*cpumask_bits(new_mask) & 1) =3D=3D 0) - return -EINVAL; - return 0; -} -static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_s= truct *src, int node) -{ - if (src->user_cpus_ptr) - return -EINVAL; - return 0; -} -static inline void release_user_cpus_ptr(struct task_struct *p) -{ - WARN_ON(p->user_cpus_ptr); -} - -static inline int dl_task_check_affinity(struct task_struct *p, const stru= ct cpumask *mask) -{ - return 0; -} -#endif =20 extern int yield_to(struct task_struct *p, bool preempt); extern void set_user_nice(struct task_struct *p, long nice); @@ -1967,11 +1931,7 @@ extern int wake_up_state(struct task_struct *tsk, un= signed int state); extern int wake_up_process(struct task_struct *tsk); extern void wake_up_new_task(struct task_struct *tsk); =20 -#ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); -#else -static inline void kick_process(struct task_struct *tsk) { } -#endif =20 extern void __set_task_comm(struct task_struct *tsk, const char *from, boo= l exec); #define set_task_comm(tsk, from) ({ \ @@ -2216,7 +2176,6 @@ extern long sched_getaffinity(pid_t pid, struct cpuma= sk *mask); #define TASK_SIZE_OF(tsk) TASK_SIZE #endif =20 -#ifdef CONFIG_SMP static inline bool owner_on_cpu(struct task_struct *owner) { /* @@ -2228,7 +2187,6 @@ static inline bool owner_on_cpu(struct task_struct *o= wner) =20 /* Returns effective CPU energy utilization, as seen by the scheduler */ unsigned long sched_cpu_util(int cpu); -#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_SCHED_CORE extern void sched_core_free(struct task_struct *tsk); diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index f9aabbc9d22e..c40115d4e34d 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -29,15 +29,11 @@ static inline bool dl_time_before(u64 a, u64 b) return (s64)(a - b) < 0; } =20 -#ifdef CONFIG_SMP - struct root_domain; extern void dl_add_task_root_domain(struct task_struct *p); extern void dl_clear_root_domain(struct root_domain *rd); extern void dl_clear_root_domain_cpu(int cpu); =20 -#endif /* CONFIG_SMP */ - extern u64 dl_cookie; extern bool dl_bw_visited(int cpu, u64 cookie); =20 diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h index 439f6029d3b9..8465ff1f20d1 100644 --- a/include/linux/sched/idle.h +++ b/include/linux/sched/idle.h @@ -11,11 +11,7 @@ enum cpu_idle_type { CPU_MAX_IDLE_TYPES }; =20 -#ifdef CONFIG_SMP extern void wake_up_if_idle(int cpu); -#else -static inline void wake_up_if_idle(int cpu) { } -#endif =20 /* * Idle thread specific functions to determine the need_resched diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h index 6d67e9a5af6b..0db7f67935fe 100644 --- a/include/linux/sched/nohz.h +++ b/include/linux/sched/nohz.h @@ -6,7 +6,7 @@ * This is the interface between the scheduler and nohz/dynticks: */ =20 -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +#ifdef CONFIG_NO_HZ_COMMON extern void nohz_balance_enter_idle(int cpu); extern int get_nohz_timer_target(void); #else @@ -23,7 +23,7 @@ static inline void calc_load_nohz_remote(struct rq *rq) {= } static inline void calc_load_nohz_stop(void) { } #endif /* CONFIG_NO_HZ_COMMON */ =20 -#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP) +#ifdef CONFIG_NO_HZ_COMMON extern void wake_up_nohz_cpu(int cpu); #else static inline void wake_up_nohz_cpu(int cpu) { } diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 198bb5cc1774..e54e7fa76ba6 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -9,7 +9,6 @@ /* * sched-domains (multiprocessor balancing) declarations: */ -#ifdef CONFIG_SMP =20 /* Generate SD flag indexes */ #define SD_FLAG(name, mflags) __##name, @@ -200,37 +199,6 @@ extern void sched_update_asym_prefer_cpu(int cpu, int = old_prio, int new_prio); =20 # define SD_INIT_NAME(type) .name =3D #type =20 -#else /* CONFIG_SMP */ - -struct sched_domain_attr; - -static inline void -partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], - struct sched_domain_attr *dattr_new) -{ -} - -static inline bool cpus_equal_capacity(int this_cpu, int that_cpu) -{ - return true; -} - -static inline bool cpus_share_cache(int this_cpu, int that_cpu) -{ - return true; -} - -static inline bool cpus_share_resources(int this_cpu, int that_cpu) -{ - return true; -} - -static inline void sched_update_asym_prefer_cpu(int cpu, int old_prio, int= new_prio) -{ -} - -#endif /* !CONFIG_SMP */ - #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) extern void rebuild_sched_domains_energy(void); #else diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c index 72d97aa8b726..c4a488e67aa7 100644 --- a/kernel/sched/build_policy.c +++ b/kernel/sched/build_policy.c @@ -50,11 +50,9 @@ #include "idle.c" =20 #include "rt.c" +#include "cpudeadline.c" =20 -#ifdef CONFIG_SMP -# include "cpudeadline.c" -# include "pelt.c" -#endif +#include "pelt.c" =20 #include "cputime.c" #include "deadline.c" diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c index 5c485b2dfb95..e2cf3b08d4e9 100644 --- a/kernel/sched/build_utility.c +++ b/kernel/sched/build_utility.c @@ -80,10 +80,8 @@ #include "wait_bit.c" #include "wait.c" =20 -#ifdef CONFIG_SMP -# include "cpupri.c" -# include "stop_task.c" -#endif +#include "cpupri.c" +#include "stop_task.c" =20 #include "topology.c" =20 diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 21cf5809b2d8..8e5f728df3e0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -650,7 +650,6 @@ void raw_spin_rq_unlock(struct rq *rq) raw_spin_unlock(rq_lockp(rq)); } =20 -#ifdef CONFIG_SMP /* * double_rq_lock - safely lock two runqueues */ @@ -667,7 +666,6 @@ void double_rq_lock(struct rq *rq1, struct rq *rq2) =20 double_rq_clock_clear_update(rq1, rq2); } -#endif /* CONFIG_SMP */ =20 /* * __task_rq_lock - lock the rq @p resides on. @@ -949,7 +947,7 @@ static inline void hrtick_rq_init(struct rq *rq) _val; \ }) =20 -#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) +#ifdef TIF_POLLING_NRFLAG /* * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, * this avoids any races wrt polling state changes and thereby avoids @@ -988,13 +986,11 @@ static inline bool set_nr_and_not_polling(struct thre= ad_info *ti, int tif) return true; } =20 -#ifdef CONFIG_SMP static inline bool set_nr_if_polling(struct task_struct *p) { return false; } #endif -#endif =20 static bool __wake_q_add(struct wake_q_head *head, struct task_struct *tas= k) { @@ -1167,7 +1163,6 @@ void resched_cpu(int cpu) raw_spin_rq_unlock_irqrestore(rq, flags); } =20 -#ifdef CONFIG_SMP #ifdef CONFIG_NO_HZ_COMMON /* * In the semi idle case, use the nearest busy CPU for migrating timers @@ -1374,10 +1369,8 @@ bool sched_can_stop_tick(struct rq *rq) return true; } #endif /* CONFIG_NO_HZ_FULL */ -#endif /* CONFIG_SMP */ =20 -#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) &&= \ - (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) +#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_FAIR_GROUP_SCHED) /* * Iterate task_group tree rooted at *from, calling @down when first enter= ing a * node and @up when leaving it for the final time. @@ -2353,8 +2346,6 @@ unsigned long wait_task_inactive(struct task_struct *= p, unsigned int match_state return ncsw; } =20 -#ifdef CONFIG_SMP - static void __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx); =20 @@ -3305,6 +3296,8 @@ void relax_compatible_cpus_allowed_ptr(struct task_st= ruct *p) WARN_ON_ONCE(ret); } =20 +#ifdef CONFIG_SMP + void set_task_cpu(struct task_struct *p, unsigned int new_cpu) { unsigned int state =3D READ_ONCE(p->__state); @@ -3358,6 +3351,7 @@ void set_task_cpu(struct task_struct *p, unsigned int= new_cpu) =20 __set_task_cpu(p, new_cpu); } +#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_NUMA_BALANCING static void __migrate_swap_task(struct task_struct *p, int cpu) @@ -3657,17 +3651,6 @@ void sched_set_stop_task(int cpu, struct task_struct= *stop) } } =20 -#else /* !CONFIG_SMP: */ - -static inline void migrate_disable_switch(struct rq *rq, struct task_struc= t *p) { } - -static inline bool rq_has_pinned_tasks(struct rq *rq) -{ - return false; -} - -#endif /* !CONFIG_SMP */ - static void ttwu_stat(struct task_struct *p, int cpu, int wake_flags) { @@ -3678,7 +3661,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_fl= ags) =20 rq =3D this_rq(); =20 -#ifdef CONFIG_SMP if (cpu =3D=3D rq->cpu) { __schedstat_inc(rq->ttwu_local); __schedstat_inc(p->stats.nr_wakeups_local); @@ -3698,7 +3680,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_fl= ags) =20 if (wake_flags & WF_MIGRATED) __schedstat_inc(p->stats.nr_wakeups_migrate); -#endif /* CONFIG_SMP */ =20 __schedstat_inc(rq->ttwu_count); __schedstat_inc(p->stats.nr_wakeups); @@ -3727,13 +3708,11 @@ ttwu_do_activate(struct rq *rq, struct task_struct = *p, int wake_flags, if (p->sched_contributes_to_load) rq->nr_uninterruptible--; =20 -#ifdef CONFIG_SMP if (wake_flags & WF_RQ_SELECTED) en_flags |=3D ENQUEUE_RQ_SELECTED; if (wake_flags & WF_MIGRATED) en_flags |=3D ENQUEUE_MIGRATED; else -#endif if (p->in_iowait) { delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); @@ -3744,7 +3723,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p= , int wake_flags, =20 ttwu_do_wakeup(p); =20 -#ifdef CONFIG_SMP if (p->sched_class->task_woken) { /* * Our task @p is fully woken up and running; so it's safe to @@ -3766,7 +3744,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p= , int wake_flags, =20 rq->idle_stamp =3D 0; } -#endif /* CONFIG_SMP */ } =20 /* @@ -3820,7 +3797,6 @@ static int ttwu_runnable(struct task_struct *p, int w= ake_flags) return ret; } =20 -#ifdef CONFIG_SMP void sched_ttwu_pending(void *arg) { struct llist_node *llist =3D arg; @@ -3887,7 +3863,9 @@ static void __ttwu_queue_wakelist(struct task_struct = *p, int cpu, int wake_flags p->sched_remote_wakeup =3D !!(wake_flags & WF_MIGRATED); =20 WRITE_ONCE(rq->ttwu_pending, 1); +#ifdef CONFIG_SMP __smp_call_single_queue(cpu, &p->wake_entry.llist); +#endif } =20 void wake_up_if_idle(int cpu) @@ -3988,15 +3966,6 @@ static bool ttwu_queue_wakelist(struct task_struct *= p, int cpu, int wake_flags) return false; } =20 -#else /* !CONFIG_SMP: */ - -static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int= wake_flags) -{ - return false; -} - -#endif /* !CONFIG_SMP */ - static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) { struct rq *rq =3D cpu_rq(cpu); @@ -4529,10 +4498,8 @@ static void __sched_fork(unsigned long clone_flags, = struct task_struct *p) p->capture_control =3D NULL; #endif init_numa_balancing(clone_flags, p); -#ifdef CONFIG_SMP p->wake_entry.u_flags =3D CSD_TYPE_TTWU; p->migration_pending =3D NULL; -#endif init_sched_mm_cid(p); } =20 @@ -4783,14 +4750,11 @@ int sched_fork(unsigned long clone_flags, struct ta= sk_struct *p) if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif -#ifdef CONFIG_SMP p->on_cpu =3D 0; -#endif init_task_preempt_count(p); -#ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -#endif + return 0; } =20 @@ -4867,7 +4831,6 @@ void wake_up_new_task(struct task_struct *p) =20 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); WRITE_ONCE(p->__state, TASK_RUNNING); -#ifdef CONFIG_SMP /* * Fork balancing, do it here and not earlier because: * - cpus_ptr can change in the fork path @@ -4879,7 +4842,6 @@ void wake_up_new_task(struct task_struct *p) p->recent_used_cpu =3D task_cpu(p); rseq_migrate(p); __set_task_cpu(p, select_task_rq(p, task_cpu(p), &wake_flags)); -#endif rq =3D __task_rq_lock(p, &rf); update_rq_clock(rq); post_init_entity_util_avg(p); @@ -4990,7 +4952,6 @@ fire_sched_out_preempt_notifiers(struct task_struct *= curr, =20 static inline void prepare_task(struct task_struct *next) { -#ifdef CONFIG_SMP /* * Claim the task as running, we do this before switching to it * such that any running task will have this set. @@ -4999,12 +4960,10 @@ static inline void prepare_task(struct task_struct = *next) * its ordering comment. */ WRITE_ONCE(next->on_cpu, 1); -#endif } =20 static inline void finish_task(struct task_struct *prev) { -#ifdef CONFIG_SMP /* * This must be the very last reference to @prev from this CPU. After * p->on_cpu is cleared, the task can be moved to a different CPU. We @@ -5017,11 +4976,8 @@ static inline void finish_task(struct task_struct *p= rev) * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). */ smp_store_release(&prev->on_cpu, 0); -#endif } =20 -#ifdef CONFIG_SMP - static void do_balance_callbacks(struct rq *rq, struct balance_callback *h= ead) { void (*func)(struct rq *rq); @@ -5103,14 +5059,6 @@ void balance_callbacks(struct rq *rq, struct balance= _callback *head) } } =20 -#else /* !CONFIG_SMP: */ - -static inline void __balance_callbacks(struct rq *rq) -{ -} - -#endif /* !CONFIG_SMP */ - static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_fla= gs *rf) { @@ -5559,7 +5507,7 @@ unsigned long long task_sched_runtime(struct task_str= uct *p) struct rq *rq; u64 ns; =20 -#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) +#ifdef CONFIG_64BIT /* * 64-bit doesn't need locks to atomically read a 64-bit value. * So we have a optimization chance when the task's delta_exec is 0. @@ -5686,12 +5634,10 @@ void sched_tick(void) if (donor->flags & PF_WQ_WORKER) wq_worker_tick(donor); =20 -#ifdef CONFIG_SMP if (!scx_switched_all()) { rq->idle_balance =3D idle_cpu(cpu); sched_balance_trigger(rq); } -#endif } =20 #ifdef CONFIG_NO_HZ_FULL @@ -7815,12 +7761,10 @@ void show_state_filter(unsigned int state_filter) */ void __init init_idle(struct task_struct *idle, int cpu) { -#ifdef CONFIG_SMP struct affinity_context ac =3D (struct affinity_context) { .new_mask =3D cpumask_of(cpu), .flags =3D 0, }; -#endif struct rq *rq =3D cpu_rq(cpu); unsigned long flags; =20 @@ -7836,13 +7780,11 @@ void __init init_idle(struct task_struct *idle, int= cpu) idle->flags |=3D PF_KTHREAD | PF_NO_SETAFFINITY; kthread_set_per_cpu(idle, cpu); =20 -#ifdef CONFIG_SMP /* * No validation and serialization required at boot time and for * setting up the idle tasks of not yet online CPUs. */ set_cpus_allowed_common(idle, &ac); -#endif /* * We're having a chicken and egg problem, even though we are * holding rq->lock, the CPU isn't yet set to this CPU so the @@ -7861,9 +7803,7 @@ void __init init_idle(struct task_struct *idle, int c= pu) rq_set_donor(rq, idle); rcu_assign_pointer(rq->curr, idle); idle->on_rq =3D TASK_ON_RQ_QUEUED; -#ifdef CONFIG_SMP idle->on_cpu =3D 1; -#endif raw_spin_rq_unlock(rq); raw_spin_unlock_irqrestore(&idle->pi_lock, flags); =20 @@ -7876,13 +7816,9 @@ void __init init_idle(struct task_struct *idle, int = cpu) idle->sched_class =3D &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); vtime_init_idle(idle, cpu); -#ifdef CONFIG_SMP sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); -#endif } =20 -#ifdef CONFIG_SMP - int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial) { @@ -8475,13 +8411,6 @@ static int __init migration_init(void) } early_initcall(migration_init); =20 -#else /* !CONFIG_SMP: */ -void __init sched_init_smp(void) -{ - sched_init_granularity(); -} -#endif /* !CONFIG_SMP */ - int in_sched_functions(unsigned long addr) { return in_lock_functions(addr) || @@ -8507,9 +8436,7 @@ void __init sched_init(void) int i; =20 /* Make sure the linker didn't screw up */ -#ifdef CONFIG_SMP BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class)); -#endif BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class)); BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class)); BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class)); @@ -8552,9 +8479,7 @@ void __init sched_init(void) #endif /* CONFIG_RT_GROUP_SCHED */ } =20 -#ifdef CONFIG_SMP init_defrootdomain(); -#endif =20 #ifdef CONFIG_RT_GROUP_SCHED init_rt_bandwidth(&root_task_group.rt_bandwidth, @@ -8615,7 +8540,6 @@ void __init sched_init(void) rq->rt.rt_runtime =3D global_rt_runtime(); init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); #endif -#ifdef CONFIG_SMP rq->sd =3D NULL; rq->rd =3D NULL; rq->cpu_capacity =3D SCHED_CAPACITY_SCALE; @@ -8632,16 +8556,15 @@ void __init sched_init(void) INIT_LIST_HEAD(&rq->cfs_tasks); =20 rq_attach_root(rq, &def_root_domain); -# ifdef CONFIG_NO_HZ_COMMON +#ifdef CONFIG_NO_HZ_COMMON rq->last_blocked_load_update_tick =3D jiffies; atomic_set(&rq->nohz_flags, 0); =20 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); -# endif -# ifdef CONFIG_HOTPLUG_CPU +#endif +#ifdef CONFIG_HOTPLUG_CPU rcuwait_init(&rq->hotplug_wait); -# endif -#endif /* CONFIG_SMP */ +#endif hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); fair_server_init(rq); @@ -8691,8 +8614,9 @@ void __init sched_init(void) =20 #ifdef CONFIG_SMP idle_thread_set_boot_cpu(); - balance_push_set(smp_processor_id(), false); #endif + + balance_push_set(smp_processor_id(), false); init_sched_fair_class(); init_sched_ext_class(); =20 diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h index 0adeda93b5fb..249dc320a6a6 100644 --- a/kernel/sched/cpudeadline.h +++ b/kernel/sched/cpudeadline.h @@ -15,7 +15,6 @@ struct cpudl { struct cpudl_item *elements; }; =20 -#ifdef CONFIG_SMP int cpudl_find(struct cpudl *cp, struct task_struct *p, struct cpumask *l= ater_mask); void cpudl_set(struct cpudl *cp, int cpu, u64 dl); void cpudl_clear(struct cpudl *cp, int cpu); @@ -23,4 +22,3 @@ int cpudl_init(struct cpudl *cp); void cpudl_set_freecpu(struct cpudl *cp, int cpu); void cpudl_clear_freecpu(struct cpudl *cp, int cpu); void cpudl_cleanup(struct cpudl *cp); -#endif /* CONFIG_SMP */ diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h index f9c18e77d6a6..cd3bc0adb9b3 100644 --- a/kernel/sched/cpupri.h +++ b/kernel/sched/cpupri.h @@ -17,7 +17,6 @@ struct cpupri { int *cpu_to_pri; }; =20 -#ifdef CONFIG_SMP int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask); int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p, @@ -26,4 +25,3 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_s= truct *p, void cpupri_set(struct cpupri *cp, int cpu, int pri); int cpupri_init(struct cpupri *cp); void cpupri_cleanup(struct cpupri *cp); -#endif /* CONFIG_SMP */ diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 7df38ea4d650..80dd95419eac 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -111,7 +111,6 @@ static inline bool is_dl_boosted(struct sched_dl_entity= *dl_se) } #endif /* !CONFIG_RT_MUTEXES */ =20 -#ifdef CONFIG_SMP static inline struct dl_bw *dl_bw_of(int i) { RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), @@ -191,35 +190,6 @@ void __dl_update(struct dl_bw *dl_b, s64 bw) rq->dl.extra_bw +=3D bw; } } -#else /* !CONFIG_SMP: */ -static inline struct dl_bw *dl_bw_of(int i) -{ - return &cpu_rq(i)->dl.dl_bw; -} - -static inline int dl_bw_cpus(int i) -{ - return 1; -} - -static inline unsigned long dl_bw_capacity(int i) -{ - return SCHED_CAPACITY_SCALE; -} - -bool dl_bw_visited(int cpu, u64 cookie) -{ - return false; -} - -static inline -void __dl_update(struct dl_bw *dl_b, s64 bw) -{ - struct dl_rq *dl =3D container_of(dl_b, struct dl_rq, dl_bw); - - dl->extra_bw +=3D bw; -} -#endif /* !CONFIG_SMP */ =20 static inline void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus) @@ -552,23 +522,17 @@ void init_dl_rq(struct dl_rq *dl_rq) { dl_rq->root =3D RB_ROOT_CACHED; =20 -#ifdef CONFIG_SMP /* zero means no -deadline tasks */ dl_rq->earliest_dl.curr =3D dl_rq->earliest_dl.next =3D 0; =20 dl_rq->overloaded =3D 0; dl_rq->pushable_dl_tasks_root =3D RB_ROOT_CACHED; -#else - init_dl_bw(&dl_rq->dl_bw); -#endif =20 dl_rq->running_bw =3D 0; dl_rq->this_bw =3D 0; init_dl_rq_bw_ratio(dl_rq); } =20 -#ifdef CONFIG_SMP - static inline int dl_overloaded(struct rq *rq) { return atomic_read(&rq->rd->dlo_count); @@ -753,37 +717,6 @@ static struct rq *dl_task_offline_migration(struct rq = *rq, struct task_struct *p return later_rq; } =20 -#else /* !CONFIG_SMP: */ - -static inline -void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) -{ -} - -static inline -void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) -{ -} - -static inline -void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) -{ -} - -static inline -void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) -{ -} - -static inline void deadline_queue_push_tasks(struct rq *rq) -{ -} - -static inline void deadline_queue_pull_task(struct rq *rq) -{ -} -#endif /* !CONFIG_SMP */ - static void enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags); static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flag= s); @@ -1195,7 +1128,6 @@ static int start_dl_timer(struct sched_dl_entity *dl_= se) =20 static void __push_dl_task(struct rq *rq, struct rq_flags *rf) { -#ifdef CONFIG_SMP /* * Queueing this task back might have overloaded rq, check if we need * to kick someone away. @@ -1209,7 +1141,6 @@ static void __push_dl_task(struct rq *rq, struct rq_f= lags *rf) push_dl_task(rq); rq_repin_lock(rq, rf); } -#endif /* CONFIG_SMP */ } =20 /* a defer timer will not be reset if the runtime consumed was < dl_server= _min_res */ @@ -1339,7 +1270,6 @@ static enum hrtimer_restart dl_task_timer(struct hrti= mer *timer) goto unlock; } =20 -#ifdef CONFIG_SMP if (unlikely(!rq->online)) { /* * If the runqueue is no longer available, migrate the @@ -1356,7 +1286,6 @@ static enum hrtimer_restart dl_task_timer(struct hrti= mer *timer) * there. */ } -#endif /* CONFIG_SMP */ =20 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); if (dl_task(rq->donor)) @@ -1844,8 +1773,6 @@ static void init_dl_inactive_task_timer(struct sched_= dl_entity *dl_se) #define __node_2_dle(node) \ rb_entry((node), struct sched_dl_entity, rb_node) =20 -#ifdef CONFIG_SMP - static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) { struct rq *rq =3D rq_of_dl_rq(dl_rq); @@ -1881,13 +1808,6 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64= deadline) } } =20 -#else /* !CONFIG_SMP: */ - -static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} -static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} - -#endif /* !CONFIG_SMP */ - static inline void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { @@ -2214,8 +2134,6 @@ static void yield_task_dl(struct rq *rq) rq_clock_skip_update(rq); } =20 -#ifdef CONFIG_SMP - static inline bool dl_task_is_earliest_deadline(struct task_struct *p, struct rq *rq) { @@ -2345,7 +2263,6 @@ static int balance_dl(struct rq *rq, struct task_stru= ct *p, struct rq_flags *rf) =20 return sched_stop_runnable(rq) || sched_dl_runnable(rq); } -#endif /* CONFIG_SMP */ =20 /* * Only called when both the current and waking task are -deadline @@ -2359,7 +2276,6 @@ static void wakeup_preempt_dl(struct rq *rq, struct t= ask_struct *p, return; } =20 -#ifdef CONFIG_SMP /* * In the unlikely case current and p have the same deadline * let us try to decide what's the best thing to do... @@ -2367,7 +2283,6 @@ static void wakeup_preempt_dl(struct rq *rq, struct t= ask_struct *p, if ((p->dl.deadline =3D=3D rq->donor->dl.deadline) && !test_tsk_need_resched(rq->curr)) check_preempt_equal_dl(rq, p); -#endif /* CONFIG_SMP */ } =20 #ifdef CONFIG_SCHED_HRTICK @@ -2500,8 +2415,6 @@ static void task_fork_dl(struct task_struct *p) */ } =20 -#ifdef CONFIG_SMP - /* Only try algorithms three times */ #define DL_MAX_TRIES 3 =20 @@ -2995,8 +2908,6 @@ void dl_clear_root_domain_cpu(int cpu) dl_clear_root_domain(cpu_rq(cpu)->rd); } =20 -#endif /* CONFIG_SMP */ - static void switched_from_dl(struct rq *rq, struct task_struct *p) { /* @@ -3069,10 +2980,8 @@ static void switched_to_dl(struct rq *rq, struct tas= k_struct *p) } =20 if (rq->donor !=3D p) { -#ifdef CONFIG_SMP if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) deadline_queue_push_tasks(rq); -#endif if (dl_task(rq->donor)) wakeup_preempt_dl(rq, p, 0); else @@ -3149,7 +3058,6 @@ DEFINE_SCHED_CLASS(dl) =3D { .put_prev_task =3D put_prev_task_dl, .set_next_task =3D set_next_task_dl, =20 -#ifdef CONFIG_SMP .balance =3D balance_dl, .select_task_rq =3D select_task_rq_dl, .migrate_task_rq =3D migrate_task_rq_dl, @@ -3158,7 +3066,6 @@ DEFINE_SCHED_CLASS(dl) =3D { .rq_offline =3D rq_offline_dl, .task_woken =3D task_woken_dl, .find_lock_rq =3D find_lock_later_rq, -#endif /* CONFIG_SMP */ =20 .task_tick =3D task_tick_dl, .task_fork =3D task_fork_dl, @@ -3458,7 +3365,6 @@ bool dl_param_changed(struct task_struct *p, const st= ruct sched_attr *attr) return false; } =20 -#ifdef CONFIG_SMP int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial) { @@ -3570,7 +3476,6 @@ void dl_bw_free(int cpu, u64 dl_bw) { dl_bw_manage(dl_bw_req_free, cpu, dl_bw); } -#endif /* CONFIG_SMP */ =20 void print_dl_stats(struct seq_file *m, int cpu) { diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index aaba8661aa46..027750931420 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -283,7 +283,6 @@ static const struct file_operations sched_dynamic_fops = =3D { =20 __read_mostly bool sched_debug_verbose; =20 -#ifdef CONFIG_SMP static struct dentry *sd_dentry; =20 =20 @@ -311,9 +310,6 @@ static ssize_t sched_verbose_write(struct file *filp, c= onst char __user *ubuf, =20 return result; } -#else /* !CONFIG_SMP: */ -# define sched_verbose_write debugfs_write_file_bool -#endif /* !CONFIG_SMP */ =20 static const struct file_operations sched_verbose_fops =3D { .read =3D debugfs_read_file_bool, @@ -540,8 +536,6 @@ static __init int sched_init_debug(void) } late_initcall(sched_init_debug); =20 -#ifdef CONFIG_SMP - static cpumask_var_t sd_sysctl_cpus; =20 static int sd_flags_show(struct seq_file *m, void *v) @@ -652,8 +646,6 @@ void dirty_sched_domain_sysctl(int cpu) __cpumask_set_cpu(cpu, sd_sysctl_cpus); } =20 -#endif /* CONFIG_SMP */ - #ifdef CONFIG_FAIR_GROUP_SCHED static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task= _group *tg) { @@ -929,11 +921,7 @@ void print_dl_rq(struct seq_file *m, int cpu, struct d= l_rq *dl_rq) SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x)) =20 PU(dl_nr_running); -#ifdef CONFIG_SMP dl_bw =3D &cpu_rq(cpu)->rd->dl_bw; -#else - dl_bw =3D &dl_rq->dl_bw; -#endif SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw); SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw); =20 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 92ae3da8ca37..323e2f5c9361 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -88,7 +88,6 @@ static int __init setup_sched_thermal_decay_shift(char *s= tr) } __setup("sched_thermal_decay_shift=3D", setup_sched_thermal_decay_shift); =20 -#ifdef CONFIG_SMP /* * For asym packing, by default the lower numbered CPU has higher priority. */ @@ -111,7 +110,6 @@ int __weak arch_asym_cpu_priority(int cpu) * (default: ~5%) */ #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078) -#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_CFS_BANDWIDTH /* @@ -996,7 +994,6 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *= cfs_rq) /************************************************************** * Scheduling class statistics methods: */ -#ifdef CONFIG_SMP int sched_update_scaling(void) { unsigned int factor =3D get_update_sysctl_factor(); @@ -1008,7 +1005,6 @@ int sched_update_scaling(void) =20 return 0; } -#endif /* CONFIG_SMP */ =20 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se); =20 @@ -1042,8 +1038,6 @@ static bool update_deadline(struct cfs_rq *cfs_rq, st= ruct sched_entity *se) =20 #include "pelt.h" =20 -#ifdef CONFIG_SMP - static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cp= u); static unsigned long task_h_load(struct task_struct *p); static unsigned long capacity_of(int cpu); @@ -1132,18 +1126,6 @@ void post_init_entity_util_avg(struct task_struct *p) sa->runnable_avg =3D sa->util_avg; } =20 -#else /* !CONFIG_SMP: */ -void init_entity_runnable_average(struct sched_entity *se) -{ -} -void post_init_entity_util_avg(struct task_struct *p) -{ -} -static void update_tg_load_avg(struct cfs_rq *cfs_rq) -{ -} -#endif /* !CONFIG_SMP */ - static s64 update_curr_se(struct rq *rq, struct sched_entity *curr) { u64 now =3D rq_clock_task(rq); @@ -3688,14 +3670,12 @@ static void account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_add(&cfs_rq->load, se->load.weight); -#ifdef CONFIG_SMP if (entity_is_task(se)) { struct rq *rq =3D rq_of(cfs_rq); =20 account_numa_enqueue(rq, task_of(se)); list_add(&se->group_node, &rq->cfs_tasks); } -#endif cfs_rq->nr_queued++; } =20 @@ -3703,12 +3683,10 @@ static void account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) { update_load_sub(&cfs_rq->load, se->load.weight); -#ifdef CONFIG_SMP if (entity_is_task(se)) { account_numa_dequeue(rq_of(cfs_rq), task_of(se)); list_del_init(&se->group_node); } -#endif cfs_rq->nr_queued--; } =20 @@ -3760,7 +3738,6 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct = sched_entity *se) *ptr -=3D min_t(typeof(*ptr), *ptr, _val); \ } while (0) =20 -#ifdef CONFIG_SMP static inline void enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { @@ -3777,12 +3754,6 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched= _entity *se) cfs_rq->avg.load_sum =3D max_t(u32, cfs_rq->avg.load_sum, cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); } -#else /* !CONFIG_SMP: */ -static inline void -enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } -static inline void -dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } -#endif /* !CONFIG_SMP */ =20 static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, i= nt flags); =20 @@ -3814,13 +3785,11 @@ static void reweight_entity(struct cfs_rq *cfs_rq, = struct sched_entity *se, =20 update_load_set(&se->load, weight); =20 -#ifdef CONFIG_SMP do { u32 divider =3D get_pelt_divider(&se->avg); =20 se->avg.load_avg =3D div_u64(se_weight(se) * se->avg.load_sum, divider); } while (0); -#endif =20 enqueue_load_avg(cfs_rq, se); if (se->on_rq) { @@ -3855,7 +3824,6 @@ static void reweight_task_fair(struct rq *rq, struct = task_struct *p, static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); =20 #ifdef CONFIG_FAIR_GROUP_SCHED -#ifdef CONFIG_SMP /* * All this does is approximate the hierarchical proportion which includes= that * global sum we all love to hate. @@ -3962,7 +3930,6 @@ static long calc_group_shares(struct cfs_rq *cfs_rq) */ return clamp_t(long, shares, MIN_SHARES, tg_shares); } -#endif /* CONFIG_SMP */ =20 /* * Recomputes the group entity based on the current state of its group @@ -3983,11 +3950,7 @@ static void update_cfs_group(struct sched_entity *se) if (throttled_hierarchy(gcfs_rq)) return; =20 -#ifndef CONFIG_SMP - shares =3D READ_ONCE(gcfs_rq->tg->shares); -#else shares =3D calc_group_shares(gcfs_rq); -#endif if (unlikely(se->load.weight !=3D shares)) reweight_entity(cfs_rq_of(se), se, shares); } @@ -4021,7 +3984,6 @@ static inline void cfs_rq_util_change(struct cfs_rq *= cfs_rq, int flags) } } =20 -#ifdef CONFIG_SMP static inline bool load_avg_is_decayed(struct sched_avg *sa) { if (sa->load_sum) @@ -5136,48 +5098,6 @@ static inline void update_misfit_status(struct task_= struct *p, struct rq *rq) rq->misfit_task_load =3D max_t(unsigned long, task_h_load(p), 1); } =20 -#else /* !CONFIG_SMP: */ - -static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) -{ - return !cfs_rq->nr_queued; -} - -#define UPDATE_TG 0x0 -#define SKIP_AGE_LOAD 0x0 -#define DO_ATTACH 0x0 -#define DO_DETACH 0x0 - -static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_ent= ity *se, int not_used1) -{ - cfs_rq_util_change(cfs_rq, 0); -} - -static inline void remove_entity_load_avg(struct sched_entity *se) {} - -static inline void -attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} -static inline void -detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} - -static inline int sched_balance_newidle(struct rq *rq, struct rq_flags *rf) -{ - return 0; -} - -static inline void -util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} - -static inline void -util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} - -static inline void -util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, - bool task_sleep) {} -static inline void update_misfit_status(struct task_struct *p, struct rq *= rq) {} - -#endif /* !CONFIG_SMP */ - void __setparam_fair(struct task_struct *p, const struct sched_attr *attr) { struct sched_entity *se =3D &p->se; @@ -6080,7 +6000,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) resched_curr(rq); } =20 -#ifdef CONFIG_SMP static void __cfsb_csd_unthrottle(void *arg) { struct cfs_rq *cursor, *tmp; @@ -6139,12 +6058,6 @@ static inline void __unthrottle_cfs_rq_async(struct = cfs_rq *cfs_rq) if (first) smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd); } -#else /* !CONFIG_SMP: */ -static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) -{ - unthrottle_cfs_rq(cfs_rq); -} -#endif /* !CONFIG_SMP */ =20 static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) { @@ -6600,7 +6513,6 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidt= h *cfs_b) * guaranteed at this point that no additional cfs_rq of this group can * join a CSD list. */ -#ifdef CONFIG_SMP for_each_possible_cpu(i) { struct rq *rq =3D cpu_rq(i); unsigned long flags; @@ -6612,7 +6524,6 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidt= h *cfs_b) __cfsb_csd_unthrottle(rq); local_irq_restore(flags); } -#endif } =20 /* @@ -6825,7 +6736,6 @@ static inline void hrtick_update(struct rq *rq) } #endif /* !CONFIG_SCHED_HRTICK */ =20 -#ifdef CONFIG_SMP static inline bool cpu_overutilized(int cpu) { unsigned long rq_util_min, rq_util_max; @@ -6867,9 +6777,6 @@ static inline void check_update_overutilized_status(s= truct rq *rq) if (!is_rd_overutilized(rq->rd) && cpu_overutilized(rq->cpu)) set_rd_overutilized(rq->rd, 1); } -#else /* !CONFIG_SMP: */ -static inline void check_update_overutilized_status(struct rq *rq) { } -#endif /* !CONFIG_SMP */ =20 /* Runqueue only has SCHED_IDLE tasks enqueued */ static int sched_idle_rq(struct rq *rq) @@ -6878,12 +6785,10 @@ static int sched_idle_rq(struct rq *rq) rq->nr_running); } =20 -#ifdef CONFIG_SMP static int sched_idle_cpu(int cpu) { return sched_idle_rq(cpu_rq(cpu)); } -#endif =20 static void requeue_delayed_entity(struct sched_entity *se) @@ -7198,8 +7103,6 @@ static inline unsigned int cfs_h_nr_delayed(struct rq= *rq) return (rq->cfs.h_nr_queued - rq->cfs.h_nr_runnable); } =20 -#ifdef CONFIG_SMP - /* Working cpumask for: sched_balance_rq(), sched_balance_newidle(). */ static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask); @@ -8735,9 +8638,6 @@ balance_fair(struct rq *rq, struct task_struct *prev,= struct rq_flags *rf) =20 return sched_balance_newidle(rq, rf) !=3D 0; } -#else /* !CONFIG_SMP: */ -static inline void set_task_max_allowed_capacity(struct task_struct *p) {} -#endif /* !CONFIG_SMP */ =20 static void set_next_buddy(struct sched_entity *se) { @@ -9047,7 +8947,6 @@ static bool yield_to_task_fair(struct rq *rq, struct = task_struct *p) return true; } =20 -#ifdef CONFIG_SMP /************************************************** * Fair scheduling class load-balancing methods. * @@ -12970,8 +12869,6 @@ static void rq_offline_fair(struct rq *rq) clear_tg_offline_cfs_rqs(rq); } =20 -#endif /* CONFIG_SMP */ - #ifdef CONFIG_SCHED_CORE static inline bool __entity_slice_used(struct sched_entity *se, int min_nr_tasks) @@ -13199,7 +13096,6 @@ static void detach_entity_cfs_rq(struct sched_entit= y *se) { struct cfs_rq *cfs_rq =3D cfs_rq_of(se); =20 -#ifdef CONFIG_SMP /* * In case the task sched_avg hasn't been attached: * - A forked task which hasn't been woken up by wake_up_new_task(). @@ -13208,7 +13104,6 @@ static void detach_entity_cfs_rq(struct sched_entit= y *se) */ if (!se->avg.last_update_time) return; -#endif =20 /* Catch up with the cfs_rq and remove our load when we leave */ update_load_avg(cfs_rq, se, 0); @@ -13272,7 +13167,6 @@ static void __set_next_task_fair(struct rq *rq, str= uct task_struct *p, bool firs { struct sched_entity *se =3D &p->se; =20 -#ifdef CONFIG_SMP if (task_on_rq_queued(p)) { /* * Move the next running task to the front of the list, so our @@ -13280,7 +13174,6 @@ static void __set_next_task_fair(struct rq *rq, str= uct task_struct *p, bool firs */ list_move(&se->group_node, &rq->cfs_tasks); } -#endif if (!first) return; =20 @@ -13318,9 +13211,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) { cfs_rq->tasks_timeline =3D RB_ROOT_CACHED; cfs_rq->min_vruntime =3D (u64)(-(1LL << 20)); -#ifdef CONFIG_SMP raw_spin_lock_init(&cfs_rq->removed.lock); -#endif } =20 #ifdef CONFIG_FAIR_GROUP_SCHED @@ -13335,10 +13226,8 @@ static void task_change_group_fair(struct task_str= uct *p) =20 detach_task_cfs_rq(p); =20 -#ifdef CONFIG_SMP /* Tell se's cfs_rq has been changed -- migrated */ p->se.avg.last_update_time =3D 0; -#endif set_task_rq(p, task_cpu(p)); attach_task_cfs_rq(p); } @@ -13634,7 +13523,6 @@ DEFINE_SCHED_CLASS(fair) =3D { .put_prev_task =3D put_prev_task_fair, .set_next_task =3D set_next_task_fair, =20 -#ifdef CONFIG_SMP .balance =3D balance_fair, .select_task_rq =3D select_task_rq_fair, .migrate_task_rq =3D migrate_task_rq_fair, @@ -13644,7 +13532,6 @@ DEFINE_SCHED_CLASS(fair) =3D { =20 .task_dead =3D task_dead_fair, .set_cpus_allowed =3D set_cpus_allowed_fair, -#endif =20 .task_tick =3D task_tick_fair, .task_fork =3D task_fork_fair, @@ -13707,7 +13594,6 @@ void show_numa_stats(struct task_struct *p, struct = seq_file *m) =20 __init void init_sched_fair_class(void) { -#ifdef CONFIG_SMP int i; =20 for_each_possible_cpu(i) { @@ -13729,5 +13615,4 @@ __init void init_sched_fair_class(void) nohz.next_blocked =3D jiffies; zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); #endif -#endif /* CONFIG_SMP */ } diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h index 2635ec239bff..036f09de16ee 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -1,4 +1,3 @@ -#ifdef CONFIG_SMP #include "sched-pelt.h" =20 int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); @@ -181,55 +180,3 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs= _rq) return rq_clock_pelt(rq_of(cfs_rq)); } #endif /* !CONFIG_CFS_BANDWIDTH */ - -#else /* !CONFIG_SMP: */ - -static inline int -update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) -{ - return 0; -} - -static inline int -update_rt_rq_load_avg(u64 now, struct rq *rq, int running) -{ - return 0; -} - -static inline int -update_dl_rq_load_avg(u64 now, struct rq *rq, int running) -{ - return 0; -} - -static inline int -update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) -{ - return 0; -} - -static inline u64 hw_load_avg(struct rq *rq) -{ - return 0; -} - -static inline int -update_irq_load_avg(struct rq *rq, u64 running) -{ - return 0; -} - -static inline u64 rq_clock_pelt(struct rq *rq) -{ - return rq_clock_task(rq); -} - -static inline void -update_rq_clock_pelt(struct rq *rq, s64 delta) { } - -static inline void -update_idle_rq_clock_pelt(struct rq *rq) { } - -static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { } - -#endif /* !CONFIG_SMP */ diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 85768c81531c..0cd801458eef 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2451,7 +2451,11 @@ void __init init_sched_rt_class(void) GFP_KERNEL, cpu_to_node(i)); } } -#endif /* CONFIG_SMP */ +#else /* !CONFIG_SMP: */ +void __init init_sched_rt_class(void) +{ +} +#endif /* !CONFIG_SMP */ =20 /* * When switching a task to RT, we may overload the runqueue diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index aec0c8253c1c..bb70bcc68e2b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -442,14 +442,12 @@ struct task_group { /* runqueue "owned" by this group on each CPU */ struct cfs_rq **cfs_rq; unsigned long shares; -#ifdef CONFIG_SMP /* * load_avg can be heavily contended at clock tick time, so put * it in its own cache-line separated from the fields above which * will also be accessed at each tick. */ atomic_long_t load_avg ____cacheline_aligned; -#endif /* CONFIG_SMP */ #endif /* CONFIG_FAIR_GROUP_SCHED */ =20 #ifdef CONFIG_RT_GROUP_SCHED @@ -573,13 +571,8 @@ extern int sched_group_set_shares(struct task_group *t= g, unsigned long shares); =20 extern int sched_group_set_idle(struct task_group *tg, long idle); =20 -#ifdef CONFIG_SMP extern void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next); -#else /* !CONFIG_SMP: */ -static inline void set_task_rq_fair(struct sched_entity *se, - struct cfs_rq *prev, struct cfs_rq *next) { } -#endif /* !CONFIG_SMP */ #else /* !CONFIG_FAIR_GROUP_SCHED: */ static inline int sched_group_set_shares(struct task_group *tg, unsigned l= ong shares) { return 0; } static inline int sched_group_set_idle(struct task_group *tg, long idle) {= return 0; } @@ -667,7 +660,6 @@ struct cfs_rq { struct sched_entity *curr; struct sched_entity *next; =20 -#ifdef CONFIG_SMP /* * CFS load tracking */ @@ -699,7 +691,6 @@ struct cfs_rq { u64 last_h_load_update; struct sched_entity *h_load_next; #endif /* CONFIG_FAIR_GROUP_SCHED */ -#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_FAIR_GROUP_SCHED struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ @@ -796,14 +787,10 @@ struct rt_rq { struct rt_prio_array active; unsigned int rt_nr_running; unsigned int rr_nr_running; -#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED struct { int curr; /* highest queued rt task prio */ -#ifdef CONFIG_SMP int next; /* next highest */ -#endif } highest_prio; -#endif #ifdef CONFIG_SMP bool overloaded; struct plist_head pushable_tasks; @@ -839,7 +826,6 @@ struct dl_rq { =20 unsigned int dl_nr_running; =20 -#ifdef CONFIG_SMP /* * Deadline values of the currently executing and the * earliest ready task on this rq. Caching these facilitates @@ -859,9 +845,7 @@ struct dl_rq { * of the leftmost (earliest deadline) element. */ struct rb_root_cached pushable_dl_tasks_root; -#else /* !CONFIG_SMP: */ - struct dl_bw dl_bw; -#endif /* !CONFIG_SMP */ + /* * "Active utilization" for this runqueue: increased when a * task wakes up (becomes TASK_RUNNING) and decreased when a @@ -932,7 +916,6 @@ static inline long se_runnable(struct sched_entity *se) =20 #endif /* !CONFIG_FAIR_GROUP_SCHED */ =20 -#ifdef CONFIG_SMP /* * XXX we want to get rid of these helpers and use the full load resolutio= n. */ @@ -1043,7 +1026,6 @@ static inline void set_rd_overloaded(struct root_doma= in *rd, int status) #ifdef HAVE_RT_PUSH_IPI extern void rto_push_irq_work_func(struct irq_work *work); #endif -#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_UCLAMP_TASK /* @@ -1107,18 +1089,14 @@ struct rq { unsigned int numa_migrate_on; #endif #ifdef CONFIG_NO_HZ_COMMON -#ifdef CONFIG_SMP unsigned long last_blocked_load_update_tick; unsigned int has_blocked_load; call_single_data_t nohz_csd; -#endif /* CONFIG_SMP */ unsigned int nohz_tick_stopped; atomic_t nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ =20 -#ifdef CONFIG_SMP unsigned int ttwu_pending; -#endif u64 nr_switches; =20 #ifdef CONFIG_UCLAMP_TASK @@ -1183,7 +1161,6 @@ struct rq { int membarrier_state; #endif =20 -#ifdef CONFIG_SMP struct root_domain *rd; struct sched_domain __rcu *sd; =20 @@ -1224,7 +1201,6 @@ struct rq { #ifdef CONFIG_HOTPLUG_CPU struct rcuwait hotplug_wait; #endif -#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_IRQ_TIME_ACCOUNTING u64 prev_irq_time; @@ -1271,9 +1247,7 @@ struct rq { struct cpuidle_state *idle_state; #endif =20 -#ifdef CONFIG_SMP unsigned int nr_pinned; -#endif unsigned int push_busy; struct cpu_stop_work push_work; =20 @@ -1299,7 +1273,7 @@ struct rq { /* Scratch cpumask to be temporarily used under rq_lock */ cpumask_var_t scratch_mask; =20 -#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP) +#ifdef CONFIG_CFS_BANDWIDTH call_single_data_t cfsb_csd; struct list_head cfsb_csd_list; #endif @@ -1962,8 +1936,6 @@ init_numa_balancing(unsigned long clone_flags, struct= task_struct *p) =20 #endif /* !CONFIG_NUMA_BALANCING */ =20 -#ifdef CONFIG_SMP - static inline void queue_balance_callback(struct rq *rq, struct balance_callback *head, @@ -2129,8 +2101,6 @@ static inline const struct cpumask *task_user_cpus(st= ruct task_struct *p) return p->user_cpus_ptr; } =20 -#endif /* CONFIG_SMP */ - #ifdef CONFIG_CGROUP_SCHED =20 /* @@ -2417,7 +2387,6 @@ struct sched_class { void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_s= truct *next); void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); =20 -#ifdef CONFIG_SMP int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); =20 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); @@ -2430,7 +2399,6 @@ struct sched_class { void (*rq_offline)(struct rq *rq); =20 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); -#endif /* CONFIG_SMP */ =20 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued); void (*task_fork)(struct task_struct *p); @@ -2582,8 +2550,6 @@ extern struct task_struct *pick_task_idle(struct rq *= rq); #define SCA_MIGRATE_ENABLE 0x04 #define SCA_USER 0x08 =20 -#ifdef CONFIG_SMP - extern void update_group_capacity(struct sched_domain *sd, int cpu); =20 extern void sched_balance_trigger(struct rq *rq); @@ -2635,26 +2601,6 @@ static inline struct task_struct *get_push_task(stru= ct rq *rq) =20 extern int push_cpu_stop(void *arg); =20 -#else /* !CONFIG_SMP: */ - -static inline bool task_allowed_on_cpu(struct task_struct *p, int cpu) -{ - return true; -} - -static inline int __set_cpus_allowed_ptr(struct task_struct *p, - struct affinity_context *ctx) -{ - return set_cpus_allowed_ptr(p, ctx->new_mask); -} - -static inline cpumask_t *alloc_user_cpus_ptr(int node) -{ - return NULL; -} - -#endif /* !CONFIG_SMP */ - #ifdef CONFIG_CPU_IDLE =20 static inline void idle_set_state(struct rq *rq, @@ -2931,8 +2877,6 @@ static inline class_##name##_t class_##name##_constru= ctor(type *lock, type *lock { class_##name##_t _t =3D { .lock =3D lock, .lock2 =3D lock2 }, *_T =3D &_= t; \ _lock; return _t; } =20 -#ifdef CONFIG_SMP - static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) { #ifdef CONFIG_SCHED_CORE @@ -3092,42 +3036,6 @@ extern void set_rq_offline(struct rq *rq); =20 extern bool sched_smp_initialized; =20 -#else /* !CONFIG_SMP: */ - -/* - * double_rq_lock - safely lock two runqueues - * - * Note this does not disable interrupts like task_rq_lock, - * you need to do so manually before calling. - */ -static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) - __acquires(rq1->lock) - __acquires(rq2->lock) -{ - WARN_ON_ONCE(!irqs_disabled()); - WARN_ON_ONCE(rq1 !=3D rq2); - raw_spin_rq_lock(rq1); - __acquire(rq2->lock); /* Fake it out ;) */ - double_rq_clock_clear_update(rq1, rq2); -} - -/* - * double_rq_unlock - safely unlock two runqueues - * - * Note this does not restore interrupts like task_rq_unlock, - * you need to do so manually after calling. - */ -static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) - __releases(rq1->lock) - __releases(rq2->lock) -{ - WARN_ON_ONCE(rq1 !=3D rq2); - raw_spin_rq_unlock(rq1); - __release(rq2->lock); -} - -#endif /* !CONFIG_SMP */ - DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq, double_rq_lock(_T->lock, _T->lock2), double_rq_unlock(_T->lock, _T->lock2)) @@ -3186,7 +3094,7 @@ extern void nohz_balance_exit_idle(struct rq *rq); static inline void nohz_balance_exit_idle(struct rq *rq) { } #endif /* !CONFIG_NO_HZ_COMMON */ =20 -#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) +#ifdef CONFIG_NO_HZ_COMMON extern void nohz_run_idle_balance(int cpu); #else static inline void nohz_run_idle_balance(int cpu) { } @@ -3312,8 +3220,6 @@ static inline void cpufreq_update_util(struct rq *rq,= unsigned int flags) { } # define arch_scale_freq_invariant() false #endif =20 -#ifdef CONFIG_SMP - unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, unsigned long *min, unsigned long *max); @@ -3357,10 +3263,6 @@ static inline unsigned long cpu_util_rt(struct rq *r= q) return READ_ONCE(rq->avg_rt.util_avg); } =20 -#else /* !CONFIG_SMP: */ -static inline bool update_other_load_avgs(struct rq *rq) { return false; } -#endif /* !CONFIG_SMP */ - #ifdef CONFIG_UCLAMP_TASK =20 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp= _id); @@ -3581,7 +3483,6 @@ static inline void membarrier_switch_mm(struct rq *rq, =20 #endif /* !CONFIG_MEMBARRIER */ =20 -#ifdef CONFIG_SMP static inline bool is_per_cpu_kthread(struct task_struct *p) { if (!(p->flags & PF_KTHREAD)) @@ -3592,7 +3493,6 @@ static inline bool is_per_cpu_kthread(struct task_str= uct *p) =20 return true; } -#endif /* CONFIG_SMP */ =20 extern void swake_up_all_locked(struct swait_queue_head *q); extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_qu= eue *wait); @@ -3891,7 +3791,6 @@ static inline void init_sched_mm_cid(struct task_stru= ct *t) { } =20 extern u64 avg_vruntime(struct cfs_rq *cfs_rq); extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); -#ifdef CONFIG_SMP static inline void move_queued_task_locked(struct rq *src_rq, struct rq *dst_rq, struct = task_struct *task) { @@ -3912,7 +3811,6 @@ bool task_is_pushable(struct rq *rq, struct task_stru= ct *p, int cpu) =20 return false; } -#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_RT_MUTEXES =20 @@ -3953,21 +3851,8 @@ extern void check_class_changed(struct rq *rq, struc= t task_struct *p, const struct sched_class *prev_class, int oldprio); =20 -#ifdef CONFIG_SMP extern struct balance_callback *splice_balance_callbacks(struct rq *rq); extern void balance_callbacks(struct rq *rq, struct balance_callback *head= ); -#else /* !CONFIG_SMP: */ - -static inline struct balance_callback *splice_balance_callbacks(struct rq = *rq) -{ - return NULL; -} - -static inline void balance_callbacks(struct rq *rq, struct balance_callbac= k *head) -{ -} - -#endif /* !CONFIG_SMP */ =20 #ifdef CONFIG_SCHED_CLASS_EXT /* diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index 5cb5e9487f0d..d7fccf871c7d 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -1119,7 +1119,6 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sch= ed_attr __user *, uattr, return copy_struct_to_user(uattr, usize, &kattr, sizeof(kattr), NULL); } =20 -#ifdef CONFIG_SMP int dl_task_check_affinity(struct task_struct *p, const struct cpumask *ma= sk) { /* @@ -1148,7 +1147,6 @@ int dl_task_check_affinity(struct task_struct *p, con= st struct cpumask *mask) =20 return 0; } -#endif /* CONFIG_SMP */ =20 int __sched_setaffinity(struct task_struct *p, struct affinity_context *ct= x) { diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 7ce0bac86e94..72a9a8e17986 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -15,8 +15,6 @@ void sched_domains_mutex_unlock(void) mutex_unlock(&sched_domains_mutex); } =20 -#ifdef CONFIG_SMP - /* Protected by sched_domains_mutex: */ static cpumask_var_t sched_domains_tmpmask; static cpumask_var_t sched_domains_tmpmask2; @@ -1335,11 +1333,10 @@ static void init_sched_groups_capacity(int cpu, str= uct sched_domain *sd) update_group_capacity(sd, cpu); } =20 -#ifdef CONFIG_SMP - /* Update the "asym_prefer_cpu" when arch_asym_cpu_priority() changes. */ void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) { +#ifdef CONFIG_SMP int asym_prefer_cpu =3D cpu; struct sched_domain *sd; =20 @@ -1389,9 +1386,8 @@ void sched_update_asym_prefer_cpu(int cpu, int old_pr= io, int new_prio) =20 WRITE_ONCE(sg->asym_prefer_cpu, asym_prefer_cpu); } -} - #endif /* CONFIG_SMP */ +} =20 /* * Set of available CPUs grouped by their corresponding capacities @@ -2857,5 +2853,3 @@ void partition_sched_domains(int ndoms_new, cpumask_v= ar_t doms_new[], partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); sched_domains_mutex_unlock(); } - -#endif /* CONFIG_SMP */ --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 2CCAC2517B9 for ; Wed, 28 May 2025 08:11:02 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419863; cv=none; b=YsaNMKsB4R0q+84huWREE95tLWSKhJfgebAv5si8Oz549OFIAuhWSKz/1xw8j2RlplVSnLu+i7J/DcG9JpU8ZH3DT3jKDG8/dcm9Z/eHKYb37bobhCzb19ZZ1vcrp1JMzsrWX1KO/AcG+4BUB+0ODiU9f6eRPxDb9FAmSPl7imI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419863; c=relaxed/simple; bh=sLanDBJBBAyjRd/ro3K0Wy5iwo185WPogzHo4PmuQvU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=mkMHhdTakF98e/8l2MMNXqppiAhf0hqL43LzkM9m3yM+kfAmrpzoZ0LMEkwHylNIKDa2YcovTj0FkNi5/HDSmcCpsldzNz7pCTsrMEeioa8EsRxmLFCiQDayPRf3i1nOsMTCnEgHXxet+hSOaCXEMkpFN1zFbSRjMBI8uosuQ/s= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=KOcw5wZd; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="KOcw5wZd" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9ECDAC4CEEF; Wed, 28 May 2025 08:10:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419862; bh=sLanDBJBBAyjRd/ro3K0Wy5iwo185WPogzHo4PmuQvU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=KOcw5wZd1qvaBBCmahA419fAsoH3HbPZsm/S2pce0Dcgs0Ea2zkd7vm38P/EH+sJH XieOiky4i+ADwknEMqy7w5Y151KvSNxue6IwTi6L1+TDNGWF6D/HuWjLOF/80rKIqR PVV4ZvF8E0YewJGGqz4ltOyeZRTDqgvu6nArHyl0fnr6j05z0FYagkNU9PrBovYr1A 3iM/DAhI5qwOKj9tUS3MBj/qz2o5WOiSSUR3wqRB3rtRtk+MLSeSMQlW/OIln4uIYY IdBZT7tQagoouK4k1tXMGsBUhmzf6k3uC8/ZufhSWNsj20hFw/0/k8wMd4ON1cq2O/ Jgo7tg9arKygg== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 21/43] sched/smp: Always define is_percpu_thread() and scheduler_ipi() Date: Wed, 28 May 2025 10:09:02 +0200 Message-ID: <20250528080924.2273858-22-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- include/linux/sched.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index f60aaa7fc4ad..8d3167059675 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1756,12 +1756,8 @@ extern struct pid *cad_pid; =20 static __always_inline bool is_percpu_thread(void) { -#ifdef CONFIG_SMP return (current->flags & PF_NO_SETAFFINITY) && (current->nr_cpus_allowed =3D=3D 1); -#else - return true; -#endif } =20 /* Per-process atomic flags. */ @@ -1958,7 +1954,6 @@ extern void __set_task_comm(struct task_struct *tsk, = const char *from, bool exec buf; \ }) =20 -#ifdef CONFIG_SMP static __always_inline void scheduler_ipi(void) { /* @@ -1968,9 +1963,6 @@ static __always_inline void scheduler_ipi(void) */ preempt_fold_need_resched(); } -#else -static inline void scheduler_ipi(void) { } -#endif =20 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int= match_state); =20 --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 56A602690D6 for ; Wed, 28 May 2025 08:11:06 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419866; cv=none; b=su8oT//udXciO1iF2RN6JoFGFV9JeT42+SkAVJHM58pRmTLbd8Krb8jPDmcD2aIPOQQgOvdiL0MZ9OdA8BStTq4nDPktC/QWMF5/TTgvbc4csBz3yub+raWj7GeIbLq/a/X1X0LCgodWO9AsFQlgv5gl8HtHj4gS937Yrc2ZR+k= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419866; c=relaxed/simple; bh=j4zcNcSlSsYbTd3vaYS/4fsyrq1dVdZgtgW88vkwa6U=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=uJPhkgsKw4LV/KFe/E68cgEXMioDuMV32dGSKHvIYW4B1rdmEpU40TNh/AcSh/RLWUcNr/uK74owIqn3pOW+3CdvU7UaDbJdqbxT1UsbFN4tJ22YJ7LwwVnk9L1ydf2hAbO7zwfnOfcdKNh7v8JE+QoOJwnfOOkYxln3U8AZkjY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=OAbtIWv8; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="OAbtIWv8" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 2E05BC4CEF0; Wed, 28 May 2025 08:11:02 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419866; bh=j4zcNcSlSsYbTd3vaYS/4fsyrq1dVdZgtgW88vkwa6U=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=OAbtIWv8g+FA7UDxonQXuvBhhhr/yPReoX3FKiQARyX920jUbqx/T+NBo+/uttP85 sG0sCX0zHHxGOstxWzqeaw0faYqzWChET069iKUkYgIfZSnINNgpq7EonvAVewt2kX bRhx0wZHIngRCAo1uFA6ZLw4FrPhiY7Vr8jopg9kWK9IKmt168JuJAVZVGfvdzkrQt 7Tfv4Wnattjh5jQGljmmpJDoSungm4JEeG56H0LmmULL2iygc1IPiWHmJZGizZC27I kUD0D4Q8HVXj66CcJB9aOs7m+gnkma8OWNOsPEAtAEPaJd6n3ZMjiR67isH2rL8Oia ViToytKFNJAUA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 22/43] sched/smp: Always define rq->hrtick_csd Date: Wed, 28 May 2025 10:09:03 +0200 Message-ID: <20250528080924.2273858-23-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/core.c | 23 ----------------------- kernel/sched/sched.h | 2 -- 2 files changed, 25 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8e5f728df3e0..73f035c21fbe 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -851,8 +851,6 @@ static enum hrtimer_restart hrtick(struct hrtimer *time= r) return HRTIMER_NORESTART; } =20 -#ifdef CONFIG_SMP - static void __hrtick_restart(struct rq *rq) { struct hrtimer *timer =3D &rq->hrtick_timer; @@ -897,30 +895,9 @@ void hrtick_start(struct rq *rq, u64 delay) smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); } =20 -#else /* !CONFIG_SMP: */ -/* - * Called to set the hrtick timer state. - * - * called with rq->lock held and IRQs disabled - */ -void hrtick_start(struct rq *rq, u64 delay) -{ - /* - * Don't schedule slices shorter than 10000ns, that just - * doesn't make sense. Rely on vruntime for fairness. - */ - delay =3D max_t(u64, delay, 10000LL); - hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), - HRTIMER_MODE_REL_PINNED_HARD); -} - -#endif /* !CONFIG_SMP */ - static void hrtick_rq_init(struct rq *rq) { -#ifdef CONFIG_SMP INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); -#endif hrtimer_setup(&rq->hrtick_timer, hrtick, CLOCK_MONOTONIC, HRTIMER_MODE_RE= L_HARD); } #else /* !CONFIG_SCHED_HRTICK: */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index bb70bcc68e2b..de130ffa42b2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1218,9 +1218,7 @@ struct rq { long calc_load_active; =20 #ifdef CONFIG_SCHED_HRTICK -#ifdef CONFIG_SMP call_single_data_t hrtick_csd; -#endif struct hrtimer hrtick_timer; ktime_t hrtick_time; #endif --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 46593269B1C for ; Wed, 28 May 2025 08:11:09 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419872; cv=none; b=pLCt2fa/hjIk1hqa/y1cWaw2AwU+S2ccsE2yf3vNki9fI9MIJuVM8sMYcRelP3eXmZ4BdlLmxIve7tt3H9GRcFQqEbKfDmbOLldysMscsMnUAPvIHhSBOvP3KxwWsQPHGV3+Vq4HPI09zcjeRA8LNCTflHD3Y4tvXyFL8gEdW+s= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419872; c=relaxed/simple; bh=4C0sfQFfvCrKmZOsZyByxBgJQJjyZD52JsOF/htWank=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=SMHvT+Kh3pY+z8zp+SQ8AV9ocTdN572jpVXKBi1Xk1zfhWIl/uJ/8PDe0OxPm4FVd2Y2FacWPGFSyl2oi523qCguDy+MSrgYNk+GpcM63aWEttzj7OgzzMRNF56wnpRGXVzp1BwUL8emY3PjBt1t6LvGrHmk9cvktsD7con8UVU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=dOZv0fqw; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="dOZv0fqw" Received: by smtp.kernel.org (Postfix) with ESMTPSA id B1DF2C4CEEB; Wed, 28 May 2025 08:11:06 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419869; bh=4C0sfQFfvCrKmZOsZyByxBgJQJjyZD52JsOF/htWank=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=dOZv0fqwkTysHecrEUoZhFcX+wdpimeGs+siUeGJMJl1e3LDoer1N/PQFB8zJfuqy raIx3+HsvnqSL6UsbsrSG0ck++NjWUNeB4mivg3z/2PbHnKjw03XKWHBcrp1ghtxsn bu8rIDH31iFgbRpm+wZmSKU5UUqxkp8ak6ViCMSXuc+N8Bx7ycGqSxwlwXD1cK/sP4 laxdT7xScEzYTIe86vhAW7UWr/UfSQdGRkMpsneMN/zTWsC58j7ZwhkS99L7/MZyMv rcvoEF+ftfPo2WZjJeaeQU/aYS2rk6xbCiM2Y9XM9xw3muNVAeB20Dj10etvDP8p6S CquwNUdYgd8Jg== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 23/43] sched/smp: Use the SMP version of try_to_wake_up() Date: Wed, 28 May 2025 10:09:04 +0200 Message-ID: <20250528080924.2273858-24-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/core.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 73f035c21fbe..850f39fe5b8f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4198,7 +4198,6 @@ int try_to_wake_up(struct task_struct *p, unsigned in= t state, int wake_flags) if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) break; =20 -#ifdef CONFIG_SMP /* * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be * possible to, falsely, observe p->on_cpu =3D=3D 0. @@ -4277,9 +4276,6 @@ int try_to_wake_up(struct task_struct *p, unsigned in= t state, int wake_flags) psi_ttwu_dequeue(p); set_task_cpu(p, cpu); } -#else /* !CONFIG_SMP: */ - cpu =3D task_cpu(p); -#endif /* !CONFIG_SMP */ =20 ttwu_queue(p, cpu, wake_flags); } --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C790A26A087 for ; Wed, 28 May 2025 08:11:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419873; cv=none; b=i9i5j2pF+PfVur2hNBl97uUipAwP7GmGZQd3LmSZsDCpuHh0v0rFTU6oPC8W2HyJoZuyE2bz4Uip/CSCAqNbPnxdeTZffA8MD3HQ3B/GeEH2lmhhTunsDNefDOxTcARQL3oFkKb1bJ1QwT5HOH7aCLKoIaUGD/VxPP46j+7auYA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419873; c=relaxed/simple; bh=Miv7nCGwvNTejjIOfYKmadg9kS3OXpi2GOuOWy9Ycc8=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=VamRZYqDqif44E3pEcnIF7wVD8SkisUFKR5NB+iIJOQV7lhl4JIl3WRSybt9CGgAS/KyoijQ75y+UBv/oppWDpglIpjpU2oa8IUZLBTivHqx56O4VFgMOceJ9j7JmtBlR/ZIIWrmZXwtoze+DLzkXgbuc5e7KKtJI3rj/vvr620= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=fAGspQnp; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="fAGspQnp" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 41E53C4CEE7; Wed, 28 May 2025 08:11:10 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419873; bh=Miv7nCGwvNTejjIOfYKmadg9kS3OXpi2GOuOWy9Ycc8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=fAGspQnplhd7jSQyrvyLcekSXjStDCnmfp1FrblhUhIoxdeCms0KraRIpkcuPHGKC wKF6twwr9us1JVbFWmS1gfz7udwb+GY8p4blrhXbK8plFps9/olr2pJnRERMQcyUOD NI7Cmm+DuV3i93ONKZ7ZLMYLe/1hI3ISiX5cEKj7NvVnC8Kr1AGIPyeG3+0lWLqGPv nCqZkzNtx9wmagoGEfPIojQY7vmMQ2JIssSgTzxxHc9WKRFPbsRGct1XsDD70I/chC SAD0Cbsbs2kzGhs0FnpnV+p8aaaK68YJzjON+GNr7pmb4ao/5zrhWiFYgGqvTGduui OEKb1/TrUQkog== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 24/43] sched/smp: Use the SMP version of __task_needs_rq_lock() Date: Wed, 28 May 2025 10:09:05 +0200 Message-ID: <20250528080924.2273858-25-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/core.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 850f39fe5b8f..9eefc74f35a8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4308,14 +4308,12 @@ static bool __task_needs_rq_lock(struct task_struct= *p) if (p->on_rq) return true; =20 -#ifdef CONFIG_SMP /* * Ensure the task has finished __schedule() and will not be referenced * anymore. Again, see try_to_wake_up() for a longer comment. */ smp_rmb(); smp_cond_load_acquire(&p->on_cpu, !VAL); -#endif =20 return false; } --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 01A2A26AA8F for ; Wed, 28 May 2025 08:11:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419877; cv=none; b=UNqRtMLXMzqvbmLWFxSw1EAdZYwDvhaxoNNMjktTyDOB3DTNUi7kWEpRwHgNfLwJEsGJG5+oHIVqCzTq+4btXz628CXsStFZi3QYBcP4bIMqXwaXfBZ0647W1vgc1y5524B3Dy5DRB8tBSpfptn05zwk1Y3v0+sLipa1lWgYscQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419877; c=relaxed/simple; bh=wDpwR49UWi0ljBeIQkbNv8UOQoWdLigZTUZ/r3UlnbM=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=pSezs7JLa4alDa+RFMJ+1w5eZsze0wZapB7IpjRyLK64utjQsmNcm4RXHY5E5nA0FLhc+5CLLxrRN7GvCNpxfROPCxfnl/ornP1ZJZIJfg9eKXcwtnnAGsfa+HU/3Gf2GQWfa+5ahm0wL56KO97lGzNVgtWvRwkcI5OxyjlVP04= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=nVc9nCU7; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="nVc9nCU7" Received: by smtp.kernel.org (Postfix) with ESMTPSA id C5E7EC4CEEF; Wed, 28 May 2025 08:11:13 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419876; bh=wDpwR49UWi0ljBeIQkbNv8UOQoWdLigZTUZ/r3UlnbM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=nVc9nCU7eeqtbqtQT9/xbuawzxsjUGBG5/jVN7aWc09YEHEctc8/VbqXzGa+UvSuJ WsER1psHWDChU5kmvQ3p8JYa684Xtat8JyMhkiCllOUlJOytDFRcolu+tiCdROK0T5 xsQQtR/d/gSzzxb5w0FPvqa7hJQmA4WnNfIRy/+oinT0bLTDa5qCLMmBdMMy6H3sf9 GeJ3DRlSIeDv/hjfE7W3yS0tw8xaRcIdJhQO+BtE4t6SHOg76B5nqJdEMlqEuIUfEO 8RzmFTTLz6dIFeyik4i90z+fMBw6UvhEWr5Xj5TCB5rWJw7pV4c7zXE0B/vKw3hcvd s2+yemP6R+Cvw== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 25/43] sched/smp: Use the SMP version of wake_up_new_task() Date: Wed, 28 May 2025 10:09:06 +0200 Message-ID: <20250528080924.2273858-26-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/core.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9eefc74f35a8..5a77afcbb60b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4820,7 +4820,6 @@ void wake_up_new_task(struct task_struct *p) activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL); trace_sched_wakeup_new(p); wakeup_preempt(rq, p, wake_flags); -#ifdef CONFIG_SMP if (p->sched_class->task_woken) { /* * Nothing relies on rq->lock after this, so it's fine to @@ -4830,7 +4829,6 @@ void wake_up_new_task(struct task_struct *p) p->sched_class->task_woken(rq, p); rq_repin_lock(rq, &rf); } -#endif task_rq_unlock(rq, p, &rf); } =20 --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E548F26D4E2 for ; Wed, 28 May 2025 08:11:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419881; cv=none; b=BvBrmdQQak7piE9qgQk1dferGBaGDMJnnctKGgXfn/HX4t3mPfcaWsz1fUGVE4T9+2DLV+wrNm0y4A3DvibJnBCo+NsmPjArkNz2+JVLrlWeBl739YIXLe12JAJWVqlzuMhcroImswlLhudr/8h+bHtHATNjrtHfErR0ixhOPE0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419881; c=relaxed/simple; bh=CwbGVD7ViB6aFSnaChoiYxmAyKxsY+s2o/PK3zPWAOU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=GgLdR7DLIks65mi/B/9jF3Me02jQAY2YL5F9E5pBCJW2BnEUnNhqCto1eQH2Htmc6K3hsP067BFytb82wZLAX/NjrouzL+JXsiZdaWkArlKIig8qJ4xsda8sTB1MHXBafAQbwEi+LqYSSAO9NbqJVONAJZVbOmNeilQ+69YbyX4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=pTCK4oP5; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="pTCK4oP5" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 54AF9C4CEEB; Wed, 28 May 2025 08:11:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419880; bh=CwbGVD7ViB6aFSnaChoiYxmAyKxsY+s2o/PK3zPWAOU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=pTCK4oP5Ysa1Bb0D4duEeVwq5ZVsT19IYi7Czlmo5CaXlD3Hy7kTFCTytVwJ1S1c4 tYkpGkUMywYgmto17lSokBolYRcevWkuO86RxRQdH//h8Zd41EYvUlCrvjPbjC7eqi Xgi+1HU/cqxE79b2jh1r1K813Xv+YL8LfPeiys/3YQBFx1Q0/ZKrmXLwDPAifBdm/h bfGAi6x1RliilT/hih7JiW0M5lw+uRbdIXsVjaE91LQlYQoP/1lgrCPzq2ZSkTPQvX MXHPVh+mowcmiTlc5+KXcmg5817eS7MdpeyqVfpw+b6wNJFEUorCx8+HNmSrHaoMyW jMPgnOkiclQlA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 26/43] sched/smp: Use the SMP version of sched_exec() Date: Wed, 28 May 2025 10:09:07 +0200 Message-ID: <20250528080924.2273858-27-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- include/linux/sched/task.h | 4 ---- kernel/sched/core.c | 4 ---- 2 files changed, 8 deletions(-) diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index ca1db4b92c32..c517dbc242f7 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -109,11 +109,7 @@ int kernel_wait(pid_t pid, int *stat); extern void free_task(struct task_struct *tsk); =20 /* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP extern void sched_exec(void); -#else -#define sched_exec() {} -#endif =20 static inline struct task_struct *get_task_struct(struct task_struct *t) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5a77afcbb60b..ac933e6fc465 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5415,8 +5415,6 @@ unsigned int nr_iowait(void) return sum; } =20 -#ifdef CONFIG_SMP - /* * sched_exec - execve() is a valuable balancing opportunity, because at * this point the task has the smallest effective memory and cache footpri= nt. @@ -5440,8 +5438,6 @@ void sched_exec(void) stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); } =20 -#endif /* CONFIG_SMP */ - DEFINE_PER_CPU(struct kernel_stat, kstat); DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); =20 --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 70C082472BE for ; Wed, 28 May 2025 08:11:24 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419884; cv=none; b=KpmrhY+o4oAtMDwcPzacqm5JH8e7TAebPEs4OKLEWZuEbPju4kDkvuRllviIKdivcQZz3Fen9wIzryUnf5xPQ/pWdp1U+IyK6nh4VX/PTqHdSOUTZWYRh8h7Y6SRbv43K3f5+UA1er4colXeAtmvwicH87TZ5b3hRXrLPxm6g6E= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419884; c=relaxed/simple; bh=MU+eHvbyChE4Bc8+8rbhPS5TvW+nIIPhEbR9gz4Z4iA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=oFmZUykJExcnOS2lagUou6X7Fk7zfCr+AwfJ/MvX4Fcj/aJ82f7MiYjLWlbFmoDcx21twTPllRtsCDJ4OxE3Rv38zyiZgdgJF6449SnncxeytIcKbvo5pRI8NbwkOp50Ef7wY+t4x2CUcwBSr5bs85D/kidAC8VeU+a4EYmZsvU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=AqXd5tKb; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="AqXd5tKb" Received: by smtp.kernel.org (Postfix) with ESMTPSA id D72F6C4CEEF; Wed, 28 May 2025 08:11:20 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419883; bh=MU+eHvbyChE4Bc8+8rbhPS5TvW+nIIPhEbR9gz4Z4iA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=AqXd5tKbMsv+WZzZmf+4QhrXyE1gpgwiVMNjSB7B7r1QdcVWmu9bwlDF0C/kFQ/FM +JVxVdxW32uST10rSIJYccp93pjEJ2KpWhhVHkhAIwnO1VqHZ0YqKzZE/wlZlrK0Xu LaSR2nElUP1YwUPGdRoZUYJptMpzg4Tc6KSyMx1xAHEo8EI8GOPEqiZeE4dI+xH3M2 QrJqzd8Z3FsZCtXaUmjaNZYDI/xlEy0zsicf5LYVHDC9h0vwGfZe4ktHkczxnpLPQS WJB3luy8cEdqxb3ngfJArM3xYI4z/EmwkDZhcCnIt2tp8GlIZq+GeT7lGHs7Tn9H7K 92mlgidEPBOyg== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 27/43] sched/smp: Use the SMP version of idle_thread_set_boot_cpu() Date: Wed, 28 May 2025 10:09:08 +0200 Message-ID: <20250528080924.2273858-28-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/core.c | 2 -- kernel/smpboot.c | 4 ---- 2 files changed, 6 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ac933e6fc465..d2d3e5e337c7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8577,9 +8577,7 @@ void __init sched_init(void) =20 calc_load_update =3D jiffies + LOAD_FREQ; =20 -#ifdef CONFIG_SMP idle_thread_set_boot_cpu(); -#endif =20 balance_push_set(smp_processor_id(), false); init_sched_fair_class(); diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 1992b62e980b..4503b60ce9bd 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -18,8 +18,6 @@ =20 #include "smpboot.h" =20 -#ifdef CONFIG_SMP - #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD /* * For the hotplug case we keep the task structs around and reuse @@ -76,8 +74,6 @@ void __init idle_threads_init(void) } #endif =20 -#endif /* #ifdef CONFIG_SMP */ - static LIST_HEAD(hotplug_threads); static DEFINE_MUTEX(smpboot_threads_lock); =20 --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E40372472BE for ; Wed, 28 May 2025 08:11:27 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419888; cv=none; b=QV0Mz0+9iAj34fEm3Ov3Sb/Zq/+4Y7GPG52GuJt7SV1ORrQHOTNTigMdNjMSm1yt4cU9+BFNfLCb3Nb6BQo1iHNW2q2pLoeaT/fdB9vUqSDPAFZwRuT8uqLH/5vDw3UvTY7ja2FRsJaa63o1d8hRE0M17yYIBlMG1/IJprTcgyM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419888; c=relaxed/simple; bh=9qIjADipKCxWWsixiqxlsxQqCV/80rzVkbIAgAeM5xg=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Kqqt5sq3ALAhvUNLQDSw7uBplmqAD1oVd/mmu9m3UemcY/eHbpyOBpkeCf6nafGjddwFifidrGoWe40HS/Pi+wpfPlmUuIbzGeanWqAhIhRiAg3KYvpLKKA/Ys6W+CNBG9FRMIxlT2wQYzcBa3yPS8ndhhog1H8pQX3dQ/Y9cW8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=KbLUUJTU; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="KbLUUJTU" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 63BA5C4CEE7; Wed, 28 May 2025 08:11:24 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419887; bh=9qIjADipKCxWWsixiqxlsxQqCV/80rzVkbIAgAeM5xg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=KbLUUJTUK1OKA6LQHimIbEkVHXJAgcqmhIeGZNDFs0MeKKmoZHNom9I7pq8fYUnyh +6egr2ApfxfV3G4myPFVCN5sIAjgIGfsr+8rbqa24Uerx0bvVPLNEr5cSLNjxKvkfm xTifqNvZRUuBI49IpI7ncx1yZQPzuqIAeJVHWjDmDgiFwLoU3DdqysIO351w+GcqXO y9c98spVS4JI4NmW3PXEBIGj7/PJJ0GLH1IRNshdl5o0dmeExi8sUfwq6Sn+wx0M9j EDdhvrBcKU+xl3ZuqbnHu1+bsDgGY+VhlGyzJObVrT8azdlcWayibh3JgtyP9Vmfso EnegwLXM6feCQ== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 28/43] sched/smp: Use the SMP version of the RT scheduling class Date: Wed, 28 May 2025 10:09:09 +0200 Message-ID: <20250528080924.2273858-29-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional in the RT policies scheduler. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/rt.c | 72 ------------------------------------------------= ---- kernel/sched/sched.h | 2 -- 2 files changed, 74 deletions(-) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 0cd801458eef..7119f4c8aa68 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -75,12 +75,10 @@ void init_rt_rq(struct rt_rq *rt_rq) /* delimiter for bitsearch: */ __set_bit(MAX_RT_PRIO, array->bitmap); =20 -#if defined CONFIG_SMP rt_rq->highest_prio.curr =3D MAX_RT_PRIO-1; rt_rq->highest_prio.next =3D MAX_RT_PRIO-1; rt_rq->overloaded =3D 0; plist_head_init(&rt_rq->pushable_tasks); -#endif /* CONFIG_SMP */ /* We start is dequeued state, because no RT tasks are queued */ rt_rq->rt_queued =3D 0; =20 @@ -329,8 +327,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct = task_group *parent) } #endif /* !CONFIG_RT_GROUP_SCHED */ =20 -#ifdef CONFIG_SMP - static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *pr= ev) { /* Try to pull RT tasks here if we lower this rq's prio */ @@ -430,21 +426,6 @@ static void dequeue_pushable_task(struct rq *rq, struc= t task_struct *p) } } =20 -#else /* !CONFIG_SMP: */ - -static inline void enqueue_pushable_task(struct rq *rq, struct task_struct= *p) -{ -} - -static inline void dequeue_pushable_task(struct rq *rq, struct task_struct= *p) -{ -} - -static inline void rt_queue_push_tasks(struct rq *rq) -{ -} -#endif /* !CONFIG_SMP */ - static void enqueue_top_rt_rq(struct rt_rq *rt_rq); static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count); =20 @@ -594,17 +575,10 @@ static int rt_se_boosted(struct sched_rt_entity *rt_s= e) return p->prio !=3D p->normal_prio; } =20 -#ifdef CONFIG_SMP static inline const struct cpumask *sched_rt_period_mask(void) { return this_rq()->rd->span; } -#else -static inline const struct cpumask *sched_rt_period_mask(void) -{ - return cpu_online_mask; -} -#endif =20 static inline struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) @@ -625,7 +599,6 @@ bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) rt_rq->rt_time < rt_b->rt_runtime); } =20 -#ifdef CONFIG_SMP /* * We ran out of runtime, see if we can borrow some from our neighbours. */ @@ -798,9 +771,6 @@ static void balance_runtime(struct rt_rq *rt_rq) raw_spin_lock(&rt_rq->rt_runtime_lock); } } -#else /* !CONFIG_SMP: */ -static inline void balance_runtime(struct rt_rq *rt_rq) {} -#endif /* !CONFIG_SMP */ =20 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) { @@ -977,10 +947,8 @@ struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidt= h *rt_b, int cpu) return &cpu_rq(cpu)->rt; } =20 -#ifdef CONFIG_SMP static void __enable_runtime(struct rq *rq) { } static void __disable_runtime(struct rq *rq) { } -#endif =20 #endif /* !CONFIG_RT_GROUP_SCHED */ =20 @@ -1075,8 +1043,6 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq) cpufreq_update_util(rq, 0); } =20 -#if defined CONFIG_SMP - static void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { @@ -1107,16 +1073,6 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int p= rev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); } =20 -#else /* !CONFIG_SMP: */ - -static inline -void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} -static inline -void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} - -#endif /* !CONFIG_SMP */ - -#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED static void inc_rt_prio(struct rt_rq *rt_rq, int prio) { @@ -1155,13 +1111,6 @@ dec_rt_prio(struct rt_rq *rt_rq, int prio) dec_rt_prio_smp(rt_rq, prio, prev_prio); } =20 -#else /* !(CONFIG_SMP || CONFIG_RT_GROUP_SCHED): */ - -static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} -static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} - -#endif /* !(CONFIG_SMP || CONFIG_RT_GROUP_SCHED) */ - #ifdef CONFIG_RT_GROUP_SCHED =20 static void @@ -1538,7 +1487,6 @@ static void yield_task_rt(struct rq *rq) requeue_task_rt(rq, rq->curr, 0); } =20 -#ifdef CONFIG_SMP static int find_lowest_rq(struct task_struct *task); =20 static int @@ -1653,7 +1601,6 @@ static int balance_rt(struct rq *rq, struct task_stru= ct *p, struct rq_flags *rf) =20 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runna= ble(rq); } -#endif /* CONFIG_SMP */ =20 /* * Preempt the current task with a newly woken task if needed: @@ -1667,7 +1614,6 @@ static void wakeup_preempt_rt(struct rq *rq, struct t= ask_struct *p, int flags) return; } =20 -#ifdef CONFIG_SMP /* * If: * @@ -1682,7 +1628,6 @@ static void wakeup_preempt_rt(struct rq *rq, struct t= ask_struct *p, int flags) */ if (p->prio =3D=3D donor->prio && !test_tsk_need_resched(rq->curr)) check_preempt_equal_prio(rq, p); -#endif /* CONFIG_SMP */ } =20 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, = bool first) @@ -1776,8 +1721,6 @@ static void put_prev_task_rt(struct rq *rq, struct ta= sk_struct *p, struct task_s enqueue_pushable_task(rq, p); } =20 -#ifdef CONFIG_SMP - /* Only try algorithms three times */ #define RT_MAX_TRIES 3 =20 @@ -2451,11 +2394,6 @@ void __init init_sched_rt_class(void) GFP_KERNEL, cpu_to_node(i)); } } -#else /* !CONFIG_SMP: */ -void __init init_sched_rt_class(void) -{ -} -#endif /* !CONFIG_SMP */ =20 /* * When switching a task to RT, we may overload the runqueue @@ -2479,10 +2417,8 @@ static void switched_to_rt(struct rq *rq, struct tas= k_struct *p) * then see if we can move to another run queue. */ if (task_on_rq_queued(p)) { -#ifdef CONFIG_SMP if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) rt_queue_push_tasks(rq); -#endif /* CONFIG_SMP */ if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq))) resched_curr(rq); } @@ -2499,7 +2435,6 @@ prio_changed_rt(struct rq *rq, struct task_struct *p,= int oldprio) return; =20 if (task_current_donor(rq, p)) { -#ifdef CONFIG_SMP /* * If our priority decreases while running, we * may need to pull tasks to this runqueue. @@ -2513,11 +2448,6 @@ prio_changed_rt(struct rq *rq, struct task_struct *p= , int oldprio) */ if (p->prio > rq->rt.highest_prio.curr) resched_curr(rq); -#else /* !CONFIG_SMP: */ - /* For UP simply resched on drop of prio */ - if (oldprio < p->prio) - resched_curr(rq); -#endif /* !CONFIG_SMP */ } else { /* * This task is not running, but if it is @@ -2638,7 +2568,6 @@ DEFINE_SCHED_CLASS(rt) =3D { .put_prev_task =3D put_prev_task_rt, .set_next_task =3D set_next_task_rt, =20 -#ifdef CONFIG_SMP .balance =3D balance_rt, .select_task_rq =3D select_task_rq_rt, .set_cpus_allowed =3D set_cpus_allowed_common, @@ -2647,7 +2576,6 @@ DEFINE_SCHED_CLASS(rt) =3D { .task_woken =3D task_woken_rt, .switched_from =3D switched_from_rt, .find_lock_rq =3D find_lock_lowest_rq, -#endif /* !CONFIG_SMP */ =20 .task_tick =3D task_tick_rt, =20 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index de130ffa42b2..7c67e1ccc0ae 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -791,11 +791,9 @@ struct rt_rq { int curr; /* highest queued rt task prio */ int next; /* next highest */ } highest_prio; -#ifdef CONFIG_SMP bool overloaded; struct plist_head pushable_tasks; =20 -#endif /* CONFIG_SMP */ int rt_queued; =20 #ifdef CONFIG_RT_GROUP_SCHED --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5E06526FA53 for ; Wed, 28 May 2025 08:11:31 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419891; cv=none; b=DVkJRMMLjMZOlYhuMLPJLsYlfcjYwUNkPyimzxBcMRM85sOBTBfNqqMuliCb76cHJ7qiqiuEVFLyNzp1toyNAc/Lh+f0dJjKntohMlAESf2gzsamnZlaFOReUaPJMlCFVqQkQJzQyppWMP6gWzbCJ2HBhCGGKgcLKOltzjD9fS0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419891; c=relaxed/simple; bh=phChXyA4Hfddq3zV4Apy/8kpLlEUlQ4HQMTQhYZya1k=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Y+lHayNeXby0Xs3bpzu60iLo/xnd+3e/t0TVBrY8ubxGQvmLAMI2Q5C4NgYsX7UyluHJiLj90hxdwuJudEp5V4f3XUfiX3hGSNpAyWsmzwKKZjCVgGglZjE88q0JS0tLlgHpXikLH3AoVYpGOPK2qSgs9ZDn2Ti7CEilQ9uKRJ4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=kqdDz1v9; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="kqdDz1v9" Received: by smtp.kernel.org (Postfix) with ESMTPSA id DF529C4CEEF; Wed, 28 May 2025 08:11:27 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419890; bh=phChXyA4Hfddq3zV4Apy/8kpLlEUlQ4HQMTQhYZya1k=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=kqdDz1v9R9JjWG3vnw4LSoJ6MR3dTElF9hThkHn3dLSJECidLQT2EiTgYNS7rTx2f FRhY1b7fJNGiwbjoneLD5HrIlhEDYORMdS3k/N+PF3VdYuZm470yXVkIUsBjDY7qPS zA/q+ruZe8jWrUvmg+4bPG7UYU96BUGcrKb4rPsC4efHIq7im3axzIX/okMXU+TuUv 0y/oHPRgE+wFWqTAO+rv1dbLs4qKdGNYxWLLjhnzwJVTeReQy3ONnp4hvguu/Pqc5G QXSFp7sJomq1/PkeHszJJTIVMYhs14OHkwO1Dgt7XeJd8VPECBzjbaOKjD6z/WJFvV 8evWcIOHmUI/w== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 29/43] sched/smp: Use the SMP version of the deadline scheduling class Date: Wed, 28 May 2025 10:09:10 +0200 Message-ID: <20250528080924.2273858-30-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/deadline.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 80dd95419eac..c36813af9bb3 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -3001,7 +3001,6 @@ static void prio_changed_dl(struct rq *rq, struct tas= k_struct *p, if (!task_on_rq_queued(p)) return; =20 -#ifdef CONFIG_SMP /* * This might be too much, but unfortunately * we don't have the old deadline value, and @@ -3030,13 +3029,6 @@ static void prio_changed_dl(struct rq *rq, struct ta= sk_struct *p, dl_time_before(p->dl.deadline, rq->curr->dl.deadline)) resched_curr(rq); } -#else /* !CONFIG_SMP: */ - /* - * We don't know if p has a earlier or later deadline, so let's blindly - * set a (maybe not needed) rescheduling point. - */ - resched_curr(rq); -#endif /* !CONFIG_SMP */ } =20 #ifdef CONFIG_SCHED_CORE --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E700626FDBF for ; Wed, 28 May 2025 08:11:34 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419896; cv=none; b=UbyOjndxjcPbD/7k4z2zw3jyHDRbrHgVH0lammZCoGRigpsg+L2W0pipugmmrm8PJvmGYW/2Q8zzALa5/7rYJzjFozc7/Y4NDmCM1bI4qNUQQKawcL+LVes/7paYKA4pzWKYXpCrsvDjgcrEgXTt2DzeWWjbkwLNPB8VoyGiFr8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419896; c=relaxed/simple; bh=vBN0Da9DcpTu33EGFTW075QZ94s78ZZf5o+m0YBdyog=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=FzMWTZ/A++x4FGDUFnfZ9XrUzWpcbWM8jt5mKRwkWNcNxWwCQgPwvXc6O4HbP6ZooyauoXs+SgdJigTxoCHQV8r8OE4cRCG7WIZtw13QcX+GAzdQMu4egk783k05PRq5/4Ay7Xkj9EPEsUqY3vfKf2cSTlQUf47T3uc236PW5HU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=iBFAdk9c; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="iBFAdk9c" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 65C4EC4CEE7; Wed, 28 May 2025 08:11:31 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419894; bh=vBN0Da9DcpTu33EGFTW075QZ94s78ZZf5o+m0YBdyog=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=iBFAdk9ct4zvv1Pw6m0RgC9xRQucfWbRoNfQ5VaX631DwAtdOElJfhN9RjtITikXH FgmGe8Kz49Y6cA1trXVABM1/1+mYncTrWGAeIdyj2ymB7z2oAFwK9acHftAKynwhy/ Ni5X2xaTHld/FjJh8VT5jpLeLn7vdv8tM7o/s2SPkNAAW7AWrQUAMXRjmLHzTDyPXV r0nvKvKUnHL6fN3nEXbWjsBr50V6TRAwbfuVay89OBpKNy3og5SRqnCseuL4ZYLEI3 RTKOOtGVzZh9o9EvG4LcB1NCxhKsyVDrnaT48s65dXnTfRiEbrbxdWXbRrR88aHfJC d6BRwgdsbI0fw== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 30/43] sched/smp: Use the SMP version of scheduler debugging data Date: Wed, 28 May 2025 10:09:11 +0200 Message-ID: <20250528080924.2273858-31-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/debug.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 027750931420..aea71013c371 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -166,8 +166,6 @@ static const struct file_operations sched_feat_fops =3D= { .release =3D single_release, }; =20 -#ifdef CONFIG_SMP - static ssize_t sched_scaling_write(struct file *filp, const char __user *u= buf, size_t cnt, loff_t *ppos) { @@ -214,8 +212,6 @@ static const struct file_operations sched_scaling_fops = =3D { .release =3D single_release, }; =20 -#endif /* CONFIG_SMP */ - #ifdef CONFIG_PREEMPT_DYNAMIC =20 static ssize_t sched_dynamic_write(struct file *filp, const char __user *u= buf, @@ -508,7 +504,6 @@ static __init int sched_init_debug(void) debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resche= d_latency_warn_ms); debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resc= hed_latency_warn_once); =20 -#ifdef CONFIG_SMP debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_= scaling_fops); debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sche= d_migration_cost); debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_mi= grate); @@ -516,7 +511,6 @@ static __init int sched_init_debug(void) sched_domains_mutex_lock(); update_sched_domain_debugfs(); sched_domains_mutex_unlock(); -#endif /* CONFIG_SMP */ =20 #ifdef CONFIG_NUMA_BALANCING numa =3D debugfs_create_dir("numa_balancing", debugfs_sched); @@ -682,11 +676,9 @@ static void print_cfs_group_stats(struct seq_file *m, = int cpu, struct task_group } =20 P(se->load.weight); -#ifdef CONFIG_SMP P(se->avg.load_avg); P(se->avg.util_avg); P(se->avg.runnable_avg); -#endif /* CONFIG_SMP */ =20 #undef PN_SCHEDSTAT #undef PN @@ -846,7 +838,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct c= fs_rq *cfs_rq) SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued); SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); -#ifdef CONFIG_SMP SEQ_printf(m, " .%-30s: %lu\n", "load_avg", cfs_rq->avg.load_avg); SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg", @@ -867,7 +858,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct c= fs_rq *cfs_rq) SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", atomic_long_read(&cfs_rq->tg->load_avg)); #endif /* CONFIG_FAIR_GROUP_SCHED */ -#endif /* CONFIG_SMP */ #ifdef CONFIG_CFS_BANDWIDTH SEQ_printf(m, " .%-30s: %d\n", "throttled", cfs_rq->throttled); @@ -964,12 +954,10 @@ do { \ #undef P #undef PN =20 -#ifdef CONFIG_SMP #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); P64(avg_idle); P64(max_idle_balance_cost); #undef P64 -#endif /* CONFIG_SMP */ =20 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n)); if (schedstat_enabled()) { @@ -1235,7 +1223,6 @@ void proc_sched_show_task(struct task_struct *p, stru= ct pid_namespace *ns, __PS("nr_involuntary_switches", p->nivcsw); =20 P(se.load.weight); -#ifdef CONFIG_SMP P(se.avg.load_sum); P(se.avg.runnable_sum); P(se.avg.util_sum); @@ -1244,7 +1231,6 @@ void proc_sched_show_task(struct task_struct *p, stru= ct pid_namespace *ns, P(se.avg.util_avg); P(se.avg.last_update_time); PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED); -#endif /* CONFIG_SMP */ #ifdef CONFIG_UCLAMP_TASK __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value); __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value); --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 65568270ECD for ; Wed, 28 May 2025 08:11:38 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419898; cv=none; b=Cg0jRO3rnuo/lSNMHoU0yKy8VYe5nro9gWtJy5mzasTNFupxDSvcKHEhcK+dCH53Q+Y4YK9gQC2EAhL/2sYD8wRa+oubJ80Q2jizg2W2rJHvKxXtJ9/1PmCGsbrytEJ7pMC4uNAJ8bwcXWnesDGqBvM56pRNC80OxgWrJU9jmFo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419898; c=relaxed/simple; bh=jm3nLmXD/nEkkUF7+EgdT+Vbjj+DyhZtqeD96ddwuLQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=nIApvgdAVJy7mEMDOMdVFDdKNuvWMQ58rBPWPQzZcTTvZwuIzngcGSWco3nZD1uVAANYmzfpIWlr8QHY93R/ImwiR4ElrmfrHfM661AlYa9/RuK3K6barx0/No8GrxwEGHG9EIonvwgu5lNy9HZ+4L1Al9ouF4xcFubRgkUV6F0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=TjFcVLiJ; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="TjFcVLiJ" Received: by smtp.kernel.org (Postfix) with ESMTPSA id DF501C4CEEB; Wed, 28 May 2025 08:11:34 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419897; bh=jm3nLmXD/nEkkUF7+EgdT+Vbjj+DyhZtqeD96ddwuLQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=TjFcVLiJ77ZidQ/IhhUZMaZRIhbyMTvDDaudL0Q4eM5L1izvYle3q7cR4uEaJ5Vrv Bii88qOyB9+LbPgYwf7eYCRGiliEZ1inw3bIgsm4BrpaZy9JY/Xq7XT5aR5QhIoK9p g+W2JFx3i47FirZVd/dMxOxUqLeflsFL+C1PahgQ9biWReDlrk2CLn8wwdwfeZQeUR EJv74J5+EZeEYT+S8BEOvWHOXuD3MiL6ZaZZK5rRgKRC6TVrIf9RbZgjN4iZG9BsXm fN7Xr2SqxaoqprFtGoqHBorQ63yemdBd7vpNiBnbKZHil1BLzYU4nCse/A8tC2okfq o+bMb1eK8Gmtw== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 31/43] sched/smp: Use the SMP version of schedstats Date: Wed, 28 May 2025 10:09:12 +0200 Message-ID: <20250528080924.2273858-32-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/stats.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index def01886e93f..adb40371e370 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -114,10 +114,8 @@ static int show_schedstat(struct seq_file *seq, void *= v) seq_printf(seq, "timestamp %lu\n", jiffies); } else { struct rq *rq; -#ifdef CONFIG_SMP struct sched_domain *sd; int dcount =3D 0; -#endif cpu =3D (unsigned long)(v - 2); rq =3D cpu_rq(cpu); =20 @@ -132,7 +130,6 @@ static int show_schedstat(struct seq_file *seq, void *v) =20 seq_printf(seq, "\n"); =20 -#ifdef CONFIG_SMP /* domain-specific stats */ rcu_read_lock(); for_each_domain(cpu, sd) { @@ -163,7 +160,6 @@ static int show_schedstat(struct seq_file *seq, void *v) sd->ttwu_move_balance); } rcu_read_unlock(); -#endif /* CONFIG_SMP */ } return 0; } --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 89E55270565 for ; Wed, 28 May 2025 08:11:41 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419901; cv=none; b=ts0mCaSflbHHBTgYDbZjCWtgR3yV0998EmmvW7JZDq4lKuhOc5F5uz2sQSBGhujShQjLPHMSpYUz163x8bdKNgxXeyPBKFb1Yv9uKOZJSYUD4j5h9IXyMqSwmi/1v1CbVmCV71RavFF36ZsQQu2StbPUu9QViMO7sDNji5m5OeY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419901; c=relaxed/simple; bh=HhJn8+Bkf3Al4z35LmUKeudBi8itfc26hNtt4GX4IZE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=gYG4gwGS1G/g3NqBOZmCHfgVnrmpF7dQTNL5Bm4d0wA3z87stGRUVjTSoshAk4aHCwxAd4Uv1jvCycaFit8TvxOkCebkiEtOEsg8GF+QYEhPQHc2d2MqVJTwYWq6uEGwHPDvZmmsG8O4MtCCPN8on+xT5PKVZi9nnpUlrU7c9CM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Cf5HRH+j; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Cf5HRH+j" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 630E1C4CEE7; Wed, 28 May 2025 08:11:38 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419901; bh=HhJn8+Bkf3Al4z35LmUKeudBi8itfc26hNtt4GX4IZE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Cf5HRH+jVr5Kcj9KAZiD76SB/UAwUOAq0MNQW9KooocId91THoq7DcSLyL4i9dMBY VQql1n/AFRyYTMK3bYsO3dd0rj/Icp/rCDQIHoc9Oq8TfSiYSn0fxxcwjFhr92mASi rjuIeBScLORL4RdqqE+6cX+lR4PtHU5/gAHmBzEtuQhUYbUVN1DfCRERrs1VJ1WHsL cJ/sjngf5UYlI4DCmQ9smFpQJO/vYJr4CB4THLXiO5kbtOM9NtVm8lgfYH62XliIaD IlGka7KfI9EdQ0lGVxXgG3G0BWVx6eg4m6r9yH2Ivbd6j52Hu7654bIRlok2rucWti 8aKZNuwwRcvDg== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 32/43] sched/smp: Use the SMP version of the scheduler syscalls Date: Wed, 28 May 2025 10:09:13 +0200 Message-ID: <20250528080924.2273858-33-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/syscalls.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index d7fccf871c7d..77ae87f36e84 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -209,10 +209,8 @@ int idle_cpu(int cpu) if (rq->nr_running) return 0; =20 -#ifdef CONFIG_SMP if (rq->ttwu_pending) return 0; -#endif =20 return 1; } @@ -641,7 +639,6 @@ int __sched_setscheduler(struct task_struct *p, goto unlock; } #endif /* CONFIG_RT_GROUP_SCHED */ -#ifdef CONFIG_SMP if (dl_bandwidth_enabled() && dl_policy(policy) && !(attr->sched_flags & SCHED_FLAG_SUGOV)) { cpumask_t *span =3D rq->rd->span; @@ -657,7 +654,6 @@ int __sched_setscheduler(struct task_struct *p, goto unlock; } } -#endif /* CONFIG_SMP */ } =20 /* Re-check policy now with rq lock held: */ @@ -1239,7 +1235,7 @@ long sched_setaffinity(pid_t pid, const struct cpumas= k *in_mask) user_mask =3D alloc_user_cpus_ptr(NUMA_NO_NODE); if (user_mask) { cpumask_copy(user_mask, in_mask); - } else if (IS_ENABLED(CONFIG_SMP)) { + } else { return -ENOMEM; } =20 --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6C0B7256C76 for ; Wed, 28 May 2025 08:11:45 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419906; cv=none; b=RgxmZ3vDSePLETQWFIMtaWkkIM1R7YKJHpfyfxxZYx/mIA6uVkFufZDxqiVbg9OBCQoU2MteXVizw/Y/ksZS32UlrjwvXJI0pe7TAxXSrC/1LfTHEesIrnMZY4hKaZyezuJw2a0/Gj+pABDrb5+th2f23bPuA5KaTVsbnAWzxzk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419906; c=relaxed/simple; bh=LAFq8PNLkFQldtV+H8a7KGA+i1YMFB1mG1+pstgzw1s=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=tbRMGqQ/dTfj6mJgJ8zVA/1kx/raU6NpzFbkErGq15Xte0qZCbPRU1dovEM//EKfKEnk9Hu1IblBsikMdRaiQBXLb6mCx3Q6xO/VLI9HBFbs63AqOVsu5vYB5f0QkPVI3QiRLYV1O91LJJ371L72Z2CnR5Ey0zAxUMHUqceV0KA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=pS0BAVO6; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="pS0BAVO6" Received: by smtp.kernel.org (Postfix) with ESMTPSA id DD0C6C4CEE7; Wed, 28 May 2025 08:11:41 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419904; bh=LAFq8PNLkFQldtV+H8a7KGA+i1YMFB1mG1+pstgzw1s=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=pS0BAVO6FXdS28TOSAxy5BXhHCG3sBYz3jVYF6i6JzwgNJlaRTz6D0Et4rbZO6ir5 S3vSAiVCxlrjOg/vGAozLP5FXqRk2NKDYGRSXrJ7AWYQI1ZHtFE9qSrI63cLRRKekr +czqADuneg46ssfXA+pQ7nWkl9+FtoEclQ+mr5uASMf0K313lvv5+xvBaHW/d7ZkiY E6FVKiBhx10hhYSXBBzqjXBhPDHmCZJTMWtn9R05hXnpt2ptw0yfNCVoXiIu0byM/P Rhuaw6uvO/zqz6hbnzAWj4o0sWT4SLZnpIYRnubEL2VRmmlh7eZJ5hTlzkD93Rx/Hp LUhWhctZTVyMg== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 33/43] sched/smp: Use the SMP version of sched_update_asym_prefer_cpu() Date: Wed, 28 May 2025 10:09:14 +0200 Message-ID: <20250528080924.2273858-34-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/topology.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 72a9a8e17986..18eb309fd8d0 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1336,7 +1336,6 @@ static void init_sched_groups_capacity(int cpu, struc= t sched_domain *sd) /* Update the "asym_prefer_cpu" when arch_asym_cpu_priority() changes. */ void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio) { -#ifdef CONFIG_SMP int asym_prefer_cpu =3D cpu; struct sched_domain *sd; =20 @@ -1386,7 +1385,6 @@ void sched_update_asym_prefer_cpu(int cpu, int old_pr= io, int new_prio) =20 WRITE_ONCE(sg->asym_prefer_cpu, asym_prefer_cpu); } -#endif /* CONFIG_SMP */ } =20 /* --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 8D3EB272E6F for ; Wed, 28 May 2025 08:11:48 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419908; cv=none; b=WRLEsWSUPp2uLc9TdFGqOEP89JsV4dRxWaiPYprGgkSvc87xDZM56XA4T7aw31Zqadgybe7jpbTuG1316s366luaS1SnY58pHmtweayddpGa1bnsmAuw6XMFwWlUH5lQMkF99z4bameY7wj1H1hfC/ktmn1kT82P6tyOgv3FP5o= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419908; c=relaxed/simple; bh=L9VeFm6TXPaYNLcRaVe3jedRlD2BYFO33qmbvkTkCBA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=qAeAVBua++Io1FmJCPU1OPjUjmB126m15XrMNGIDhkTw91vkoDEFOWT0+YUdcayg4payon7nDuefV0d54RkWK+H/rL9p41L+63bhi2U/X707Vlf3mJjClO2d0BdoIAfDgQns9FhqEorSKgik6H8ENYR08XuQuJYEHll1shmQ8jU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=iPwy8Meu; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="iPwy8Meu" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 655F1C4CEEB; Wed, 28 May 2025 08:11:45 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419908; bh=L9VeFm6TXPaYNLcRaVe3jedRlD2BYFO33qmbvkTkCBA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=iPwy8Meu7GdH8nXph6bPh7x66XI9JgcgaM/jui9JgYs1EKZbLtS47BxVLQEzrZ3ne hWGuKeoioFdSDRS5Vh+NxFWN6NkGwSusD0nva6dGASGrH1wbxhWE9O6MpxcNGEMvMQ HEjs/OO0V6POqJJ4oCeO4oI0h9EIVBNRRkQrLZTpUAMru6FwflDmQINjsoxwZyrNTW AMGiBnpLGqDS8Q1YLMgB26yHI7Fs/CIQhqSOQ0/hK+8EBpXPFG5/Fb62PQqkOJ0pOp AVS0rjXla71MrePkxtUeVhk2ZxbdQuHppefZJRndVPWiaugEXkie7H2Uc6X4c4sCwX tiZXrex5/3OQQ== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 34/43] sched/smp: Use the SMP version of the idle scheduling class Date: Wed, 28 May 2025 10:09:15 +0200 Message-ID: <20250528080924.2273858-35-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/idle.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index cb01b063d733..2571ce6908a1 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -427,7 +427,6 @@ void cpu_startup_entry(enum cpuhp_state state) * idle-task scheduling class. */ =20 -#ifdef CONFIG_SMP static int select_task_rq_idle(struct task_struct *p, int cpu, int flags) { @@ -439,7 +438,6 @@ balance_idle(struct rq *rq, struct task_struct *prev, s= truct rq_flags *rf) { return WARN_ON_ONCE(1); } -#endif /* CONFIG_SMP */ =20 /* * Idle tasks are unconditionally rescheduled: @@ -526,11 +524,9 @@ DEFINE_SCHED_CLASS(idle) =3D { .put_prev_task =3D put_prev_task_idle, .set_next_task =3D set_next_task_idle, =20 -#ifdef CONFIG_SMP .balance =3D balance_idle, .select_task_rq =3D select_task_rq_idle, .set_cpus_allowed =3D set_cpus_allowed_common, -#endif =20 .task_tick =3D task_tick_idle, =20 --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 82904272E5D for ; Wed, 28 May 2025 08:11:52 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419912; cv=none; b=qv8plivZZyqeB7R+qLSQDsmB/7YdKbHiuH3ZG8f3Cd3J92FbAc08077ClcAHM93rUnKkA4jDH/NAumQM4+CEWqMZylys5tfSOnKIl47t+RU2w7MxLUWNe/muhDD3+EcDh88lXlI035vfBx6y465bRstHVslmLwWLkMKMbSEwZZw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419912; c=relaxed/simple; bh=ogTWmMEsymIxICRnXFYXfrvFLT7gHSyLw0APjFo2rec=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=lV5sonzSuPMuMJ+mY0alJ/ljQSF/Qs+r2YJGysTYWqaiRM6ZdRQPm10xH5Rf3r+bFszdyv5ImW4B708/aMcOeTwkhl8AoCbc+/kIMMz8vmoInCQRMNmIke2FtEgIsFpoKdflotffSSRTGoz1nnci+KcpmmuPLbeCXymGP+IZKYQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=WgLipuX3; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="WgLipuX3" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E3FBBC4CEEB; Wed, 28 May 2025 08:11:48 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419911; bh=ogTWmMEsymIxICRnXFYXfrvFLT7gHSyLw0APjFo2rec=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=WgLipuX3ICPcvodtpS4mxNu9XzZOz9mXa9srdyNtgk9im7iOqRLT5At6+RPo7gwkj bLmGsuPsxVL1U7HzTYEp4whussFVO01GVeHIwZEdhNeTCkVwRF33vLkp/9OLCe+kFp jJ/Frt2ws3Ul6dIm+R8iXUuL41m0fAHCclrrx0J4JBH+CUSiILjgtpsGLa+kxfbWWW Q5Ubcthy1lCDft6rbtmjjCUwqBlATYK7v4cl+sr+HNuWWqiVrLEwEC0bpqCg4GWaN+ 64n6212sYQE/hv5QmjBxIsYp+aw0l/bNLWp0sgkOb9RpU9kV1LQ+41hgFMHGuBnxnD I+dD64lcUQL2w== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 35/43] sched/smp: Use the SMP version of the stop-CPU scheduling class Date: Wed, 28 May 2025 10:09:16 +0200 Message-ID: <20250528080924.2273858-36-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/stop_task.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 058dd42e3d9b..7314c8be6e30 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -8,7 +8,6 @@ * See kernel/stop_machine.c */ =20 -#ifdef CONFIG_SMP static int select_task_rq_stop(struct task_struct *p, int cpu, int flags) { @@ -20,7 +19,6 @@ balance_stop(struct rq *rq, struct task_struct *prev, str= uct rq_flags *rf) { return sched_stop_runnable(rq); } -#endif /* CONFIG_SMP */ =20 static void wakeup_preempt_stop(struct rq *rq, struct task_struct *p, int flags) @@ -106,11 +104,9 @@ DEFINE_SCHED_CLASS(stop) =3D { .put_prev_task =3D put_prev_task_stop, .set_next_task =3D set_next_task_stop, =20 -#ifdef CONFIG_SMP .balance =3D balance_stop, .select_task_rq =3D select_task_rq_stop, .set_cpus_allowed =3D set_cpus_allowed_common, -#endif =20 .task_tick =3D task_tick_stop, =20 --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 968FF2749C8 for ; Wed, 28 May 2025 08:11:55 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419915; cv=none; b=R8LQcYN9XRBs75yYvtBwKz830KToArEjOcc3UwwJhkuy+7skM6CinC5jX3SIpmHc5OSO29SUjddXzTpLXRKK96U01ks83+RALMCPB4J2fC6ZEm37xcO1rXYOHRs1BcUMmuX/AUHM3Oyj6KmK4KlIYJ19+DAnxK4QbGO2zk/k3D4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419915; c=relaxed/simple; bh=kE4OMaYgnKgLZviVYjit63KeaW99OsI+UKX2oXISfuQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=YbIbt1vBE/YI5pgJ7jDy7BxzNzpc6Hyb59KTGoBqNrbUKKwHwjmpjdA9QUlmDENA1KEL0phTeAUswxvuby0T6mmRMgtZN0FhJvEMruqqhik1B7IiLeCnfEnXfFsGJLhZg/C4Ni3rzEiz7Xvn6VIEcCHazm6MhK6RRO3JjBkQ6jw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=k7aVYc1S; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="k7aVYc1S" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 7001DC4CEE7; Wed, 28 May 2025 08:11:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419915; bh=kE4OMaYgnKgLZviVYjit63KeaW99OsI+UKX2oXISfuQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=k7aVYc1SlQQlVCzn0R6QoHIpn0mYS62sE1dlMWXr+rAcZ1reIi9iv3nv6INHYyhBt dEiHsof6tSTOsduxzqYZNUhkBPGnZMd5zqJRGKBB+pxmvUOdfLdMmeHWkoVD3rx3DT jfxiyJqqm4+zD2mQ44iSsaaBBoWSFc8HgGprpzIhDUm8AALsJr2x4DA33gy5kXCks4 Xh+9VUdzQDLn7nECx5IJcJWfTaEd2q4dPWG4gTqzor7RZz49A4aOzyQT5nmUHM0Coi miIXEbGTEECUIm5Q0DFbKsVq5Et253dA9zlnMGmGAwM2BAWD9qgaiyVo7zyZcqmr61 8Yi5z7igAwMGQ== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 36/43] sched/smp: Use the SMP version of cpu_of() Date: Wed, 28 May 2025 10:09:17 +0200 Message-ID: <20250528080924.2273858-37-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/sched.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7c67e1ccc0ae..19808e09bd94 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1293,11 +1293,7 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq) =20 static inline int cpu_of(struct rq *rq) { -#ifdef CONFIG_SMP return rq->cpu; -#else - return 0; -#endif } =20 #define MDF_PUSH 0x01 --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 236752580F2 for ; Wed, 28 May 2025 08:11:59 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419919; cv=none; b=UpGuEpJ+wROUXUuqoQPqwin4rrd2TKfannN4WUuiIvzqy4zyWFsK1PAuhIl1RWdxgm8xrVKGkECB0sMoN8mEeMEeRGaIKNlRAtmY036EUqotkw78Y9ILIVAEeAx6TRRIsXIB9uq+5E9Cb+0YLO1xPHj4xU9Ja5ufx9QFE3OIu6Y= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419919; c=relaxed/simple; bh=OdEdF01Lr/j0cfTsEp63McwuOtKGRi8w3iIcNvfjKK4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=PDWbWi27kE6m2omqpy8lD2wv/cF8rMyg5rssBXVBsuntxYWxUea34yQs0y/TEftez2jufuRpQtr3VtXALs6Bi2+JA/3hsbC7EBrfLQY0vHmtytXGQHG/ilkSKGzsM6GIrtgMgk1qY+dnzreKtK/15G5UOr/TAprKcNyVbEJAOec= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=TxCdjdNU; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="TxCdjdNU" Received: by smtp.kernel.org (Postfix) with ESMTPSA id EF99EC4CEE7; Wed, 28 May 2025 08:11:55 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419919; bh=OdEdF01Lr/j0cfTsEp63McwuOtKGRi8w3iIcNvfjKK4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=TxCdjdNUbh9R6zyOeJGeZf46ul6b61C4oIw5DXFp7PxkSGkEgdV9LAljHMnJsYHDD iUCGDWItZdPDOHelilZbB975vRQ5+nyxyTBpRorkYH6096+BDmsJXOV4wCjiljvpp9 PajWRIivPHgVBoV4mTYtWT5TaZ3VNzg3cYSFMNdWuWh/AdJ8YJpjUKIntVyGQ/hB/G W+lap/BwgRg9W1Y0MnEbMpdJG36oq1vIX0j+QVwnitcXEnQuKpucf3z2bUjAGEu2lF 6A5SNQwkEZ1K44JeiXGyw3Pv0CVGUDAu5WL9qpKcLQjK5zGuNABcLLVFN3g1EtyVxu ehuGjO6u6VDoA== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 37/43] sched/smp: Use the SMP version of is_migration_disabled() Date: Wed, 28 May 2025 10:09:18 +0200 Message-ID: <20250528080924.2273858-38-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/sched.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 19808e09bd94..61a5f683c502 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1300,11 +1300,7 @@ static inline int cpu_of(struct rq *rq) =20 static inline bool is_migration_disabled(struct task_struct *p) { -#ifdef CONFIG_SMP return p->migration_disabled; -#else - return false; -#endif } =20 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A4ACE2580F2 for ; Wed, 28 May 2025 08:12:02 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419922; cv=none; b=BmsCodQ/XZhNAtcmZqSvaadC+ChHwTv+nHkI1g+g82N3js5bKdBZWNH36kNPQjauSTZFTtQmGmpPXOjTiPW5UyIgxlPvfMLfqEM4Pi4e4NkcogJ2CMx/dIIN0CwomVMgxJGirWcLPIeNBijagFQy3H/1n9V1B1roawyxhU1PIhA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419922; c=relaxed/simple; bh=fZXEfXDXoR6mUA8KJflJTQEmYbNsOdcpzlbTVIKB9QQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=sNYveAMV/K2fxMEYTrnnDH2vRDUfD78y6D4zGIXVqKyW/FxAR9G2IcBD74W1BsPJGufbXhpw6jlNiUmU4q4QyObGgiC2AQqSN5ZMWC0E2WHOms8kdGqaEXG6iCqgqzNwEOTqpe6NxNro+Rj8FKwcDY6Xmot1G7V4syPxoKaGsW0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=AwpD6ON1; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="AwpD6ON1" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 7EC0EC4CEEB; Wed, 28 May 2025 08:11:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419922; bh=fZXEfXDXoR6mUA8KJflJTQEmYbNsOdcpzlbTVIKB9QQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=AwpD6ON1Qv9faraH5aJAAiDJGRcZrY53tfelt1/YbPrDpHRcXF2HSg4Nwb177DT1H xERUHQzius13u76y6AIFZ6YChDFqKaZVOnUn+G+QVD2rXL67TFGnt5sRzS8KOyqbux HZmXKW4qo+76lA7OARnNIGA3jNXLLwDufKgDRxLKntGxE8m5ZWvlQYr9O0EWgc9703 9SZUB4PYVBbEClEEu2Ljk/bn966DM5mPXLG1LcK0r+/oWCjfJdSetdRlvR1GVLpYwC 87nKuA80b1+Ln6UAi37MBBrUvSDTzvPS2mPG+n8lhVzQf2Y8xG2h/XTLjPe+Uddcbw 1xOpXEiEpmD/Q== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 38/43] sched/smp: Use the SMP version of rq_pin_lock() Date: Wed, 28 May 2025 10:09:19 +0200 Message-ID: <20250528080924.2273858-39-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/sched.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 61a5f683c502..28e2847a3584 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1744,9 +1744,7 @@ static inline void rq_pin_lock(struct rq *rq, struct = rq_flags *rf) =20 rq->clock_update_flags &=3D (RQCF_REQ_SKIP|RQCF_ACT_SKIP); rf->clock_update_flags =3D 0; -#ifdef CONFIG_SMP WARN_ON_ONCE(rq->balance_callback && rq->balance_callback !=3D &balance_p= ush_callback); -#endif } =20 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 36D20248F6A for ; Wed, 28 May 2025 08:12:06 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419926; cv=none; b=CpIb3ruSGfWOCLT1ZemFxhK8XvU1LZw6oLEKQ6f/sQJ+jrxsOw1+w38nQSo06MbJwkVAczwnAkyHFJ5+TQpuIvKfEyNVFLxQhx5ROfxDwZEpbMIo2G1irxUgY4KowS4hLfJGlGCxbdEQyuoPHdy174PmPlCvG3d71T4ApO7tVUE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419926; c=relaxed/simple; bh=EWBGbiAoT9c7/GC67CiLl0LDzZ4HPT2SbxPRGmzMgYI=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=uRee8a9VydQwLXbUnho3BzVhPAqIUXCnRZrFqNxkf1FYKGAA6eEmkc93TL45Dj9JFPTp6eRnQ0c0B55OJNILcX4NoNvrJ3n+eN6w3TvglEIYzJy8qsAfdZNfQiowdkwqP0KlGR1CSdjXBLs/qYpeO8F477AI6YoRvGEcQAqlDBw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=iwX1b/+C; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="iwX1b/+C" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 0BB5AC4CEEB; Wed, 28 May 2025 08:12:02 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419926; bh=EWBGbiAoT9c7/GC67CiLl0LDzZ4HPT2SbxPRGmzMgYI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=iwX1b/+CZjdfcTZIjSLQ1CxOgkjSd/9P5M3zefKWRKcYC/gESUzVeHYji2rMT3iDc fR+zJBjxcFBxKLUsAAhUHwzQPC2cY38YKR6J4u+AWFSrUVnAdLE8NhbnLhoiMHRwdi ocinKFFfRWx1og3EfReT6xHCxSh+tdY8RXvZ2wdTVAeM42h54k0pHeguGzFKIHs7mA N1PqexnDHyDOo7SLzC+QYOD0ngOm+5m676b6SLK2qD7CsX9I7u7UJyvpXQ+8AYcXIV UjHJvSrnCvoCZBXUxcyT0SJRTSJQpRLE/eVLZPP+zPqaYl9apuzP2QHqIbGzOq0wfN Mpn/Ak2OzW2kw== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 39/43] sched/smp: Use the SMP version of task_on_cpu() Date: Wed, 28 May 2025 10:09:20 +0200 Message-ID: <20250528080924.2273858-40-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/sched.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 28e2847a3584..fbc9d25a3e6c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2237,11 +2237,7 @@ static inline int task_current_donor(struct rq *rq, = struct task_struct *p) =20 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) { -#ifdef CONFIG_SMP return p->on_cpu; -#else - return task_current(rq, p); -#endif } =20 static inline int task_on_rq_queued(struct task_struct *p) --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C67302750FE for ; Wed, 28 May 2025 08:12:09 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419929; cv=none; b=pqejAjrStuS6TSjIheI7PTfidiilMcMfdnB/AmhrHdkxxQOPgIWUfFjashcnGbSvRA5C64AXsFe3QCmm85mrX5IDLqwhgK0Ovn8/7FKg8Ei7sU6fkLaRKYRB/m8VAXAg+p7u62UYCG5e9CZyXDwHw6gjxK724cxQdp03Rqm8wwA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419929; c=relaxed/simple; bh=dwYh0Xw5KXdyDJX+1yXvaB7TRm12J+XmblxF+9fR9RY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=FPXBVQ46CueKvUHSst17BDG64NY+ZONVV97xE4KmwLIkCKIe30MBTtqRZ4b2mBxOMykK6NyUtgWwU1su6Jw6Oq8mIxNARmy29Y0chXAXM0kuOBlP8/6kQ+2cYlJuWKmsacCOnQHobD5VtYt9gM5LiuFRStZvqKdpuMyktg9bDv8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=axgamg/N; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="axgamg/N" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 8F09BC4CEEB; Wed, 28 May 2025 08:12:06 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419929; bh=dwYh0Xw5KXdyDJX+1yXvaB7TRm12J+XmblxF+9fR9RY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=axgamg/N1ydx4SoZufnZCMEjgA7/6CYjYYxK0xRRKeC/abnjeUK+xoB2ZMIOP57Uu UEBNXGKOo1NjmEaBjb5RCiP99WIuJaLjCuKowNbT+O41zCPCCJyn/G+gI+38tIpe2o n17mq7sZJuwSztAvzbhMt5m0UU4nJivPs7qQ+o3x7NmvvaIXfrt9yJIrO1iYMZqYTX WvxU0GxCMI7/lSWQhj5zf85JTklziCLUrblEr27BEW0GNThT95gWTMMXasII+u96Ik H5yXHPzc2q1IM1sxgumHFbc6hEXCPIRT2pRyowikp/GDp47QakyeYik/+j2DvcbOTC HCFW8HPDB5lmQ== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 40/43] sched/smp: Use the SMP version of WF_ and SD_ flag sanity checks Date: Wed, 28 May 2025 10:09:21 +0200 Message-ID: <20250528080924.2273858-41-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/sched.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fbc9d25a3e6c..e4aeb1e1c849 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2260,11 +2260,9 @@ static inline int task_on_rq_migrating(struct task_s= truct *p) #define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CP= U. */ #define WF_RQ_SELECTED 0x80 /* ->select_task_rq() was called */ =20 -#ifdef CONFIG_SMP static_assert(WF_EXEC =3D=3D SD_BALANCE_EXEC); static_assert(WF_FORK =3D=3D SD_BALANCE_FORK); static_assert(WF_TTWU =3D=3D SD_BALANCE_WAKE); -#endif =20 /* * To aid in avoiding the subversion of "niceness" due to uneven distribut= ion --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4A73E248F7E for ; Wed, 28 May 2025 08:12:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419933; cv=none; b=YK5ciZug0xBo5tu/qpsC8G5tZztVwbV8Br1uS9PzjLkLuX8wJzaSEZHXGliSgsrgtwdjhg8W16eNtlih0p36oWgfgaZwMcql3xOlMzFM+kCDSwgmgoH3U3UTKitB+Azuyf95I5WtDWTvI7mPHBePPXZySPDmf/Zix4KyPpEn0js= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419933; c=relaxed/simple; bh=z/RR0FJpocDARlFVrhQXFGutmtDQTpHxlDPhV664NzA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=NVgHuIq6xygxLOj3dzQpn51wctjxEZbjCS19mVXYY+twjwJGRVq2nAGkcQUNAji67xwiY2Y29rLobK5AoMUmzMUo9GUXz+PHcarf+0P1nOIryEb1FrTEAwjRR4AFm7fLGuJ6/m0aPy6yjkYCLf8f7gd8tfZVAfHsns6TKt+eEpI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=QNoNygN0; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="QNoNygN0" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 239B3C4CEE7; Wed, 28 May 2025 08:12:09 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419933; bh=z/RR0FJpocDARlFVrhQXFGutmtDQTpHxlDPhV664NzA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=QNoNygN0hsSo4RaRa6eXoWvSevAKHqUoTundXNE7ozZN1ENQqj7Tf1Jol2boP7rzt BX6pe3N8dI0Lh2TSje7dTutrrG2ILpWhxn9QJLV1HrK6ZSMxc5bMcHrrE1UhZRsBxk ao/4lrY54Vffn4/G/gJ9lnN6+VbsVUijeWar6IM0rjTfyCafO+FsJnokhcvrNwzo13 NUmW9rhmFRYxWJWtn3MvD+qiFkVN1VeJ5qv/SYWzkp9sCtN7YHyD6vxo+xDuWDVIp0 HjYucmiJoOAuyX23NXfWlqyU+9M3bsl86r+6kNpsQ5uk3CXkv5PGIsIoTv/WpUJjdV zQ4VtnbYDsJAw== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 41/43] sched/smp: Use the SMP version of ENQUEUE_MIGRATED Date: Wed, 28 May 2025 10:09:22 +0200 Message-ID: <20250528080924.2273858-42-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/sched.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e4aeb1e1c849..20d9da03d947 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2318,11 +2318,7 @@ extern const u32 sched_prio_to_wmult[40]; =20 #define ENQUEUE_HEAD 0x10 #define ENQUEUE_REPLENISH 0x20 -#ifdef CONFIG_SMP #define ENQUEUE_MIGRATED 0x40 -#else -#define ENQUEUE_MIGRATED 0x00 -#endif #define ENQUEUE_INITIAL 0x80 #define ENQUEUE_MIGRATING 0x100 #define ENQUEUE_DELAYED 0x200 --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D3174248F7E for ; Wed, 28 May 2025 08:12:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419936; cv=none; b=GkzKpPDSjNJ3jRDSF2MPwGL1EiNUUYAng6+TETVSbbqG4e2kViWXWpusV4im4SdYLZDaViVuFutFMo7JznXYE76mM+AeGtSOVsGwRXkoyQLXtcIdEAaIqJZeKjGvQ7MjANy0m6NSHWFkeBzRrqIDi1nLnbOFasE3CBiYI15X3+0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419936; c=relaxed/simple; bh=62s0reeWfQxHXuwxdD7E6S/pqUe7Ku+24A1AX4kVVyg=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=SvNJQB3C4+ZLIyCOHQSjwxvUMIcBl1pTTK2UGo/Vm1zx8OLdwD9CAm/i/ZGPaLrwre2IXviHQ1j+usQogzE/5Z6bCuxsFXA0jYeiH294jq/QeZ0VdH7GAeaH1XTUGIeDovYMHYYtqNmo+CXy7uyMbC+EBdDib1cb2odL6QwHTWE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=VhfmmbIf; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="VhfmmbIf" Received: by smtp.kernel.org (Postfix) with ESMTPSA id A7BB5C4CEE7; Wed, 28 May 2025 08:12:13 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419936; bh=62s0reeWfQxHXuwxdD7E6S/pqUe7Ku+24A1AX4kVVyg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=VhfmmbIfoYDzdqQxgcfyn9hrgIGqd9X4BZpUa2h7K1I4DKh8LnCFEKn5SGOJBx0/1 coxcOspA2NhJzz0mFIf2TDZaneKyGiRBje2MMyr0CT32ZbONCCRGjQC6ph4wSbyl5X IA4qA4zhKV2A1xFZIdUaJNOeoZ+zflHyHKms4CQPi5Pt6eBiorxLEhWB3BuIKagBSR BSKW/XepqrYIlKv9Ekpn7Wn8B8NeJi1l4+Ibe0wnHgyMb50AwsEQNAXkcBHLfMztX7 VJ6wAFmHxgLMgJJUKJaHg74xePOGZiGgIbKfHlCtwwigweCY7GEivq+WQcuxZ4QKqx OuJ513gqIRw2Q== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 42/43] sched/smp: Use the SMP version of add_nr_running() Date: Wed, 28 May 2025 10:09:23 +0200 Message-ID: <20250528080924.2273858-43-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/sched.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 20d9da03d947..e6d637015d0c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2672,10 +2672,8 @@ static inline void add_nr_running(struct rq *rq, uns= igned count) call_trace_sched_update_nr_running(rq, count); } =20 -#ifdef CONFIG_SMP if (prev_nr < 2 && rq->nr_running >=3D 2) set_rd_overloaded(rq->rd, 1); -#endif =20 sched_update_tick_dependency(rq); } --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5D493248F7E for ; Wed, 28 May 2025 08:12:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419940; cv=none; b=m9cLvZys/VyY7j1R4RkAXoblqR4Sy/aI3tQiwUQlM6iGZJwSb9oQjPpVyigw1LBLsR3etM5q0dC3f/DK28mjpiohsaoi/sTf6xDoK7yqLLdobe3Kx2lI2BSGuDbUje1eW0ZYOGbxQjkfSdPIUAXxZJA8iqE/I8o+jOYa8BERcLU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748419940; c=relaxed/simple; bh=SlC96ZS+oaGKKEF2VkZ32gtN91o4MUhZYLJuONUwBV0=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=feNzsqHPMAhobwvbzdRtbevYudVWkO2s0fbBmax8ToLMOqnRt7Sn+5yrYiEy074tzVypucwToqw/Kp/Vk+Ze/6DfC9omtqEfOGcwu7QPNKObNYoCWMGAC0trKX1fKAH422W2wpTQ7oE9YlHKeM4NBMv8ipL2secrVk4hekYcWJg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=SKLxPC0P; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="SKLxPC0P" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 35753C4CEEB; Wed, 28 May 2025 08:12:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1748419940; bh=SlC96ZS+oaGKKEF2VkZ32gtN91o4MUhZYLJuONUwBV0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=SKLxPC0P1EtdgHsrspnwunI0sy5m8MQ/xkpksZAhIjaoU3Kvdc2dilCKfzGjgo6xC +d80gr4gsX9tt+AUqrMMFSgD1dDjFoRXxxgy0GwmZnEj1taRGLKB7BECgBc30F9+7N rfW5ljfMvXR4zjIukt+KUnkKDhYNfMpPE7Nx2ob42P/kaK5AJI9GCREZ78QexbEyd3 GpCSOQmpBvEIGirBbNmFQaZ0vpzuJvk2ONR1n+tgRARxZ+y/1gyh6kWH6XKbHRhY7s S0ZZYfTcSXJJ9wxqBq1RcOaf7FNnYmSrmekgNQ0IpE3giYkT1IfTOeLHhCCRsQg+od tjHaqvSg+Yz3w== From: Ingo Molnar To: linux-kernel@vger.kernel.org Cc: Dietmar Eggemann , Linus Torvalds , Peter Zijlstra , Shrikanth Hegde , Thomas Gleixner , Valentin Schneider , Steven Rostedt , Mel Gorman , Vincent Guittot , Sebastian Andrzej Siewior , Ingo Molnar , Juri Lelli Subject: [PATCH 43/43] sched/smp: Use the SMP version of double_rq_clock_clear_update() Date: Wed, 28 May 2025 10:09:24 +0200 Message-ID: <20250528080924.2273858-44-mingo@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> References: <20250528080924.2273858-1-mingo@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Simplify the scheduler by making formerly SMP-only primitives and data structures unconditional. Signed-off-by: Ingo Molnar Cc: Dietmar Eggemann Cc: Juri Lelli Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Shrikanth Hegde Cc: Steven Rostedt Cc: Valentin Schneider Cc: Vincent Guittot --- kernel/sched/sched.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e6d637015d0c..dedf92fae94f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2839,10 +2839,7 @@ unsigned long arch_scale_freq_capacity(int cpu) static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq = *rq2) { rq1->clock_update_flags &=3D (RQCF_REQ_SKIP|RQCF_ACT_SKIP); - /* rq1 =3D=3D rq2 for !CONFIG_SMP, so just clear RQCF_UPDATED once. */ -#ifdef CONFIG_SMP rq2->clock_update_flags &=3D (RQCF_REQ_SKIP|RQCF_ACT_SKIP); -#endif } =20 #define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \ --=20 2.45.2 From nobody Sat Feb 7 12:32:01 2026 Received: from abb.hmeau.com (abb.hmeau.com [144.6.53.87]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 25427230BD0 for ; Fri, 30 May 2025 09:34:40 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=144.6.53.87 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748597688; cv=none; b=j2JSK9iu/p4cBtoknOzAG/c8mi0AqY5dkz6aa406dOFSIpA73Tijd3lfJy8R7kwbdM9HcSkUIGwmFkvEn8nM4wamzO8pF002JoZrM3ok8kR7PixNew59G0duMevNeJwEyGqdt2i4c6xkLtcMRrmleCC7dTnfwAia6/vBcRYdre4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1748597688; c=relaxed/simple; bh=EoT2dKpjRWSuMyiN59l+2L7JeKYfQR+Tq6keNX2utZc=; h=Date:From:To:Cc:Subject:Message-ID:MIME-Version:Content-Type: Content-Disposition:In-Reply-To; b=bp9iDoJB+WclVoOR/agzVoReH6xeO584rgkEnMWMs1TB5XsaoetGt4Ikz73EM/yZu+z4AKOViDEOGmoznUdihQqmAx9RB3dqEAk4RJ/T3GbwCtJ+x66nyaBE4kpUrKURuRkmw/vVTBdZZLrjq+MjA+OX0kbWl1bfQ25GvAlYY98= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au; spf=pass smtp.mailfrom=gondor.apana.org.au; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b=ra1IbCWz; arc=none smtp.client-ip=144.6.53.87 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=gondor.apana.org.au Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=hmeau.com header.i=@hmeau.com header.b="ra1IbCWz" DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=hmeau.com; s=formenos; h=In-Reply-To:Content-Type:MIME-Version:Message-ID:Subject:Cc:To: From:Date:Sender:Reply-To:Content-Transfer-Encoding:Content-ID: Content-Description:Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc :Resent-Message-ID:References:List-Id:List-Help:List-Unsubscribe: List-Subscribe:List-Post:List-Owner:List-Archive; bh=tSBVL+2tXg2frEyJDCxYdCIb+/FbCVTtW/DFFR+NWi4=; b=ra1IbCWzYjXAeGcf5LUjRsW/Tc d31sOF+bs/BjFXRTAkEwHpsUvISLMde6zzUb1/w00iMMuNcYJYwB4GnG/qZBfBHvdwCu5R+OQ9PPk cyEeHjR9m0XLb6IEsd4T2sNXOqcDB9Zl66vakwjnM92cIdlZn9jmyIWEJGJCRwv1PAquvZY2V5Bvo dqH2Sh4p0VT0inNUlXKw898NXJy1LiC0bYMZZNJ3yMqKTDODjitNgkFviOk1zbGHXak4rHHg4sQCn YHtL9PUsGyThmsmAlDdia+aE6L6/odL1XBluYB1+xKLyBY8plqN7FpcRsjStA8Ca1u7lcvKO8fOkO ZlSZD+1Q==; Received: from loth.rohan.me.apana.org.au ([192.168.167.2]) by formenos.hmeau.com with smtp (Exim 4.96 #2 (Debian)) id 1uKw8H-009kfY-1N; Fri, 30 May 2025 17:34:34 +0800 Received: by loth.rohan.me.apana.org.au (sSMTP sendmail emulation); Fri, 30 May 2025 17:34:33 +0800 Date: Fri, 30 May 2025 17:34:33 +0800 From: Herbert Xu To: Ingo Molnar Cc: linux-kernel@vger.kernel.org, dietmar.eggemann@arm.com, torvalds@linux-foundation.org, peterz@infradead.org, sshegde@linux.ibm.com, tglx@linutronix.de, vschneid@redhat.com, rostedt@goodmis.org, mgorman@suse.de, vincent.guittot@linaro.org, bigeasy@linutronix.de, mingo@kernel.org Subject: [PATCH] sched: Break dependency loop between sched.h and preempt.h Message-ID: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20250528080924.2273858-1-mingo@kernel.org> X-Newsgroups: apana.lists.os.linux.kernel Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Ingo Molnar wrote: > > The latest version of this series can be found at: >=20 > git://git.kernel.org/pub/scm/linux/kernel/git/mingo/tip.git WIP.sc= hed/core I'm trying to break a dependency loop between linux/sched.h and linux/preempt.h and your tree seems to be a good base for my patch. Reported-by: Huacai Chen ---8<--- There is a dependency loop between linux/preempt.h and linux/sched.h: https://patchwork.kernel.org/project/linux-crypto/patch/20250530041658.9095= 76-1-chenhuacai@loongson.cn/ In particular, sched.h relies on PREEMPT_LOCK_OFFSET from preempt.h, introduced by commit fe32d3cd5e8e; while preempt.h relies on sched.h for current->softirq_disable_cnt, introduced by commit 728b478d2d35. sched.h actually includes preempt.h, while preempt.h does not include sched.h which causes build failures in users who include preempt.h without sched.h. Fix this by splitting struct task_struct out of sched.h and into sched/types.h. Then preempt.h can include sched/types.h and sched.h can continue to include preempt.h without creating a loop. Note that the struct seq_file forward declaration has been kept in linux/sched.h despite there being no users in either sched.h or sched/types.h. This is because multiple header files are relying on that forward declaration. Reported-by: Huacai Chen Signed-off-by: Herbert Xu diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 1fad1c8a4c76..abb0d982396b 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -7,8 +7,10 @@ * preempt_count (used for kernel preemption, interrupt count, etc.) */ =20 +#include #include #include +#include #include =20 /* diff --git a/include/linux/sched.h b/include/linux/sched.h index 8d3167059675..44bfd980f620 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -10,80 +10,21 @@ #include =20 #include -#include #include #include #include =20 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include =20 -/* task_struct member predeclarations (sorted alphabetically): */ -struct audit_context; -struct bio_list; -struct blk_plug; -struct bpf_local_storage; -struct bpf_run_ctx; -struct bpf_net_context; -struct capture_control; -struct cfs_rq; -struct fs_struct; -struct futex_pi_state; -struct io_context; -struct io_uring_task; -struct mempolicy; -struct nameidata; -struct nsproxy; -struct perf_event_context; -struct perf_ctx_data; +/* struct predeclarations (sorted alphabetically): */ struct pid_namespace; -struct pipe_inode_info; -struct rcu_node; -struct reclaim_state; -struct robust_list_head; struct root_domain; -struct rq; struct sched_attr; -struct sched_dl_entity; struct seq_file; -struct sighand_struct; -struct signal_struct; -struct task_delay_info; -struct task_group; -struct task_struct; -struct user_event_mm; - -#include =20 /* * Task state bitmask. NOTE! These bits are also @@ -310,14 +251,6 @@ struct user_event_mm; =20 #define get_current_state() READ_ONCE(current->__state) =20 -/* - * Define the task command name length as enum, then it can be visible to - * BPF programs. - */ -enum { - TASK_COMM_LEN =3D 16, -}; - extern void sched_tick(void); =20 #define MAX_SCHEDULE_TIMEOUT LONG_MAX @@ -343,46 +276,6 @@ extern void io_schedule(void); DECLARE_TRACEPOINT(sched_set_state_tp); extern void __trace_set_current_state(int state_value); =20 -/** - * struct prev_cputime - snapshot of system and user cputime - * @utime: time spent in user mode - * @stime: time spent in system mode - * @lock: protects the above two fields - * - * Stores previous user/system time values such that we can guarantee - * monotonicity. - */ -struct prev_cputime { -#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - u64 utime; - u64 stime; - raw_spinlock_t lock; -#endif -}; - -enum vtime_state { - /* Task is sleeping or running in a CPU with VTIME inactive: */ - VTIME_INACTIVE =3D 0, - /* Task is idle */ - VTIME_IDLE, - /* Task runs in kernelspace in a CPU with VTIME active: */ - VTIME_SYS, - /* Task runs in userspace in a CPU with VTIME active: */ - VTIME_USER, - /* Task runs as guests in a CPU with VTIME active: */ - VTIME_GUEST, -}; - -struct vtime { - seqcount_t seqcount; - unsigned long long starttime; - enum vtime_state state; - unsigned int cpu; - u64 utime; - u64 stime; - u64 gtime; -}; - /* * Utilization clamp constraints. * @UCLAMP_MIN: Minimum utilization @@ -404,380 +297,6 @@ struct sched_param { int sched_priority; }; =20 -struct sched_info { -#ifdef CONFIG_SCHED_INFO - /* Cumulative counters: */ - - /* # of times we have run on this CPU: */ - unsigned long pcount; - - /* Time spent waiting on a runqueue: */ - unsigned long long run_delay; - - /* Max time spent waiting on a runqueue: */ - unsigned long long max_run_delay; - - /* Min time spent waiting on a runqueue: */ - unsigned long long min_run_delay; - - /* Timestamps: */ - - /* When did we last run on a CPU? */ - unsigned long long last_arrival; - - /* When were we last queued to run? */ - unsigned long long last_queued; - -#endif /* CONFIG_SCHED_INFO */ -}; - -/* - * Integer metrics need fixed point arithmetic, e.g., sched/fair - * has a few: load, load_avg, util_avg, freq, and capacity. - * - * We define a basic fixed point arithmetic range, and then formalize - * all these metrics based on that basic range. - */ -# define SCHED_FIXEDPOINT_SHIFT 10 -# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) - -/* Increase resolution of cpu_capacity calculations */ -# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT -# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) - -struct load_weight { - unsigned long weight; - u32 inv_weight; -}; - -/* - * The load/runnable/util_avg accumulates an infinite geometric series - * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). - * - * [load_avg definition] - * - * load_avg =3D runnable% * scale_load_down(load) - * - * [runnable_avg definition] - * - * runnable_avg =3D runnable% * SCHED_CAPACITY_SCALE - * - * [util_avg definition] - * - * util_avg =3D running% * SCHED_CAPACITY_SCALE - * - * where runnable% is the time ratio that a sched_entity is runnable and - * running% the time ratio that a sched_entity is running. - * - * For cfs_rq, they are the aggregated values of all runnable and blocked - * sched_entities. - * - * The load/runnable/util_avg doesn't directly factor frequency scaling an= d CPU - * capacity scaling. The scaling is done through the rq_clock_pelt that is= used - * for computing those signals (see update_rq_clock_pelt()) - * - * N.B., the above ratios (runnable% and running%) themselves are in the - * range of [0, 1]. To do fixed point arithmetics, we therefore scale them - * to as large a range as necessary. This is for example reflected by - * util_avg's SCHED_CAPACITY_SCALE. - * - * [Overflow issue] - * - * The 64-bit load_sum can have 4353082796 (=3D2^64/47742/88761) entities - * with the highest load (=3D88761), always runnable on a single cfs_rq, - * and should not overflow as the number already hits PID_MAX_LIMIT. - * - * For all other cases (including 32-bit kernels), struct load_weight's - * weight will overflow first before we do, because: - * - * Max(load_avg) <=3D Max(load.weight) - * - * Then it is the load_weight's responsibility to consider overflow - * issues. - */ -struct sched_avg { - u64 last_update_time; - u64 load_sum; - u64 runnable_sum; - u32 util_sum; - u32 period_contrib; - unsigned long load_avg; - unsigned long runnable_avg; - unsigned long util_avg; - unsigned int util_est; -} ____cacheline_aligned; - -/* - * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_a= vg - * updates. When a task is dequeued, its util_est should not be updated if= its - * util_avg has not been updated in the meantime. - * This information is mapped into the MSB bit of util_est at dequeue time. - * Since max value of util_est for a task is 1024 (PELT util_avg for a tas= k) - * it is safe to use MSB. - */ -#define UTIL_EST_WEIGHT_SHIFT 2 -#define UTIL_AVG_UNCHANGED 0x80000000 - -struct sched_statistics { -#ifdef CONFIG_SCHEDSTATS - u64 wait_start; - u64 wait_max; - u64 wait_count; - u64 wait_sum; - u64 iowait_count; - u64 iowait_sum; - - u64 sleep_start; - u64 sleep_max; - s64 sum_sleep_runtime; - - u64 block_start; - u64 block_max; - s64 sum_block_runtime; - - s64 exec_max; - u64 slice_max; - - u64 nr_migrations_cold; - u64 nr_failed_migrations_affine; - u64 nr_failed_migrations_running; - u64 nr_failed_migrations_hot; - u64 nr_forced_migrations; - - u64 nr_wakeups; - u64 nr_wakeups_sync; - u64 nr_wakeups_migrate; - u64 nr_wakeups_local; - u64 nr_wakeups_remote; - u64 nr_wakeups_affine; - u64 nr_wakeups_affine_attempts; - u64 nr_wakeups_passive; - u64 nr_wakeups_idle; - -#ifdef CONFIG_SCHED_CORE - u64 core_forceidle_sum; -#endif -#endif /* CONFIG_SCHEDSTATS */ -} ____cacheline_aligned; - -struct sched_entity { - /* For load-balancing: */ - struct load_weight load; - struct rb_node run_node; - u64 deadline; - u64 min_vruntime; - u64 min_slice; - - struct list_head group_node; - unsigned char on_rq; - unsigned char sched_delayed; - unsigned char rel_deadline; - unsigned char custom_slice; - /* hole */ - - u64 exec_start; - u64 sum_exec_runtime; - u64 prev_sum_exec_runtime; - u64 vruntime; - s64 vlag; - u64 slice; - - u64 nr_migrations; - -#ifdef CONFIG_FAIR_GROUP_SCHED - int depth; - struct sched_entity *parent; - /* rq on which this entity is (to be) queued: */ - struct cfs_rq *cfs_rq; - /* rq "owned" by this entity/group: */ - struct cfs_rq *my_q; - /* cached value of my_q->h_nr_running */ - unsigned long runnable_weight; -#endif - - /* - * Per entity load average tracking. - * - * Put into separate cache line so it does not - * collide with read-mostly values above. - */ - struct sched_avg avg; -}; - -struct sched_rt_entity { - struct list_head run_list; - unsigned long timeout; - unsigned long watchdog_stamp; - unsigned int time_slice; - unsigned short on_rq; - unsigned short on_list; - - struct sched_rt_entity *back; -#ifdef CONFIG_RT_GROUP_SCHED - struct sched_rt_entity *parent; - /* rq on which this entity is (to be) queued: */ - struct rt_rq *rt_rq; - /* rq "owned" by this entity/group: */ - struct rt_rq *my_q; -#endif -} __randomize_layout; - -typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); -typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *); - -struct sched_dl_entity { - struct rb_node rb_node; - - /* - * Original scheduling parameters. Copied here from sched_attr - * during sched_setattr(), they will remain the same until - * the next sched_setattr(). - */ - u64 dl_runtime; /* Maximum runtime for each instance */ - u64 dl_deadline; /* Relative deadline of each instance */ - u64 dl_period; /* Separation of two instances (period) */ - u64 dl_bw; /* dl_runtime / dl_period */ - u64 dl_density; /* dl_runtime / dl_deadline */ - - /* - * Actual scheduling parameters. Initialized with the values above, - * they are continuously updated during task execution. Note that - * the remaining runtime could be < 0 in case we are in overrun. - */ - s64 runtime; /* Remaining runtime for this instance */ - u64 deadline; /* Absolute deadline for this instance */ - unsigned int flags; /* Specifying the scheduler behaviour */ - - /* - * Some bool flags: - * - * @dl_throttled tells if we exhausted the runtime. If so, the - * task has to wait for a replenishment to be performed at the - * next firing of dl_timer. - * - * @dl_yielded tells if task gave up the CPU before consuming - * all its available runtime during the last job. - * - * @dl_non_contending tells if the task is inactive while still - * contributing to the active utilization. In other words, it - * indicates if the inactive timer has been armed and its handler - * has not been executed yet. This flag is useful to avoid race - * conditions between the inactive timer handler and the wakeup - * code. - * - * @dl_overrun tells if the task asked to be informed about runtime - * overruns. - * - * @dl_server tells if this is a server entity. - * - * @dl_defer tells if this is a deferred or regular server. For - * now only defer server exists. - * - * @dl_defer_armed tells if the deferrable server is waiting - * for the replenishment timer to activate it. - * - * @dl_server_active tells if the dlserver is active(started). - * dlserver is started on first cfs enqueue on an idle runqueue - * and is stopped when a dequeue results in 0 cfs tasks on the - * runqueue. In other words, dlserver is active only when cpu's - * runqueue has atleast one cfs task. - * - * @dl_defer_running tells if the deferrable server is actually - * running, skipping the defer phase. - */ - unsigned int dl_throttled : 1; - unsigned int dl_yielded : 1; - unsigned int dl_non_contending : 1; - unsigned int dl_overrun : 1; - unsigned int dl_server : 1; - unsigned int dl_server_active : 1; - unsigned int dl_defer : 1; - unsigned int dl_defer_armed : 1; - unsigned int dl_defer_running : 1; - - /* - * Bandwidth enforcement timer. Each -deadline task has its - * own bandwidth to be enforced, thus we need one timer per task. - */ - struct hrtimer dl_timer; - - /* - * Inactive timer, responsible for decreasing the active utilization - * at the "0-lag time". When a -deadline task blocks, it contributes - * to GRUB's active utilization until the "0-lag time", hence a - * timer is needed to decrease the active utilization at the correct - * time. - */ - struct hrtimer inactive_timer; - - /* - * Bits for DL-server functionality. Also see the comment near - * dl_server_update(). - * - * @rq the runqueue this server is for - * - * @server_has_tasks() returns true if @server_pick return a - * runnable task. - */ - struct rq *rq; - dl_server_has_tasks_f server_has_tasks; - dl_server_pick_f server_pick_task; - -#ifdef CONFIG_RT_MUTEXES - /* - * Priority Inheritance. When a DEADLINE scheduling entity is boosted - * pi_se points to the donor, otherwise points to the dl_se it belongs - * to (the original one/itself). - */ - struct sched_dl_entity *pi_se; -#endif -}; - -#ifdef CONFIG_UCLAMP_TASK -/* Number of utilization clamp buckets (shorter alias) */ -#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT - -/* - * Utilization clamp for a scheduling entity - * @value: clamp value "assigned" to a se - * @bucket_id: bucket index corresponding to the "assigned" value - * @active: the se is currently refcounted in a rq's bucket - * @user_defined: the requested clamp value comes from user-space - * - * The bucket_id is the index of the clamp bucket matching the clamp value - * which is pre-computed and stored to avoid expensive integer divisions f= rom - * the fast path. - * - * The active bit is set whenever a task has got an "effective" value assi= gned, - * which can be different from the clamp value "requested" from user-space. - * This allows to know a task is refcounted in the rq's bucket correspondi= ng - * to the "effective" bucket_id. - * - * The user_defined bit is set whenever a task has got a task-specific cla= mp - * value requested from userspace, i.e. the system defaults apply to this = task - * just as a restriction. This allows to relax default clamps when a less - * restrictive task-specific value has been requested, thus allowing to - * implement a "nice" semantic. For example, a task running with a 20% - * default boost can still drop its own boosting to 0%. - */ -struct uclamp_se { - unsigned int value : bits_per(SCHED_CAPACITY_SCALE); - unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); - unsigned int active : 1; - unsigned int user_defined : 1; -}; -#endif /* CONFIG_UCLAMP_TASK */ - -union rcu_special { - struct { - u8 blocked; - u8 need_qs; - u8 exp_hint; /* Hint for performance. */ - u8 need_mb; /* Readers need smp_mb(). */ - } b; /* Bits. */ - u32 s; /* Set of bits. */ -}; - enum perf_event_task_context { perf_invalid_context =3D -1, perf_hw_context =3D 0, @@ -785,863 +304,6 @@ enum perf_event_task_context { perf_nr_task_contexts, }; =20 -/* - * Number of contexts where an event can trigger: - * task, softirq, hardirq, nmi. - */ -#define PERF_NR_CONTEXTS 4 - -struct wake_q_node { - struct wake_q_node *next; -}; - -struct kmap_ctrl { -#ifdef CONFIG_KMAP_LOCAL - int idx; - pte_t pteval[KM_MAX_IDX]; -#endif -}; - -struct task_struct { -#ifdef CONFIG_THREAD_INFO_IN_TASK - /* - * For reasons of header soup (see current_thread_info()), this - * must be the first element of task_struct. - */ - struct thread_info thread_info; -#endif - unsigned int __state; - - /* saved state for "spinlock sleepers" */ - unsigned int saved_state; - - /* - * This begins the randomizable portion of task_struct. Only - * scheduling-critical items should be added above here. - */ - randomized_struct_fields_start - - void *stack; - refcount_t usage; - /* Per task flags (PF_*), defined further below: */ - unsigned int flags; - unsigned int ptrace; - -#ifdef CONFIG_MEM_ALLOC_PROFILING - struct alloc_tag *alloc_tag; -#endif - - int on_cpu; - struct __call_single_node wake_entry; - unsigned int wakee_flips; - unsigned long wakee_flip_decay_ts; - struct task_struct *last_wakee; - - /* - * recent_used_cpu is initially set as the last CPU used by a task - * that wakes affine another task. Waker/wakee relationships can - * push tasks around a CPU where each wakeup moves to the next one. - * Tracking a recently used CPU allows a quick search for a recently - * used CPU that may be idle. - */ - int recent_used_cpu; - int wake_cpu; - int on_rq; - - int prio; - int static_prio; - int normal_prio; - unsigned int rt_priority; - - struct sched_entity se; - struct sched_rt_entity rt; - struct sched_dl_entity dl; - struct sched_dl_entity *dl_server; -#ifdef CONFIG_SCHED_CLASS_EXT - struct sched_ext_entity scx; -#endif - const struct sched_class *sched_class; - -#ifdef CONFIG_SCHED_CORE - struct rb_node core_node; - unsigned long core_cookie; - unsigned int core_occupation; -#endif - -#ifdef CONFIG_CGROUP_SCHED - struct task_group *sched_task_group; -#endif - - -#ifdef CONFIG_UCLAMP_TASK - /* - * Clamp values requested for a scheduling entity. - * Must be updated with task_rq_lock() held. - */ - struct uclamp_se uclamp_req[UCLAMP_CNT]; - /* - * Effective clamp values used for a scheduling entity. - * Must be updated with task_rq_lock() held. - */ - struct uclamp_se uclamp[UCLAMP_CNT]; -#endif - - struct sched_statistics stats; - -#ifdef CONFIG_PREEMPT_NOTIFIERS - /* List of struct preempt_notifier: */ - struct hlist_head preempt_notifiers; -#endif - -#ifdef CONFIG_BLK_DEV_IO_TRACE - unsigned int btrace_seq; -#endif - - unsigned int policy; - unsigned long max_allowed_capacity; - int nr_cpus_allowed; - const cpumask_t *cpus_ptr; - cpumask_t *user_cpus_ptr; - cpumask_t cpus_mask; - void *migration_pending; - unsigned short migration_disabled; - unsigned short migration_flags; - -#ifdef CONFIG_PREEMPT_RCU - int rcu_read_lock_nesting; - union rcu_special rcu_read_unlock_special; - struct list_head rcu_node_entry; - struct rcu_node *rcu_blocked_node; -#endif /* #ifdef CONFIG_PREEMPT_RCU */ - -#ifdef CONFIG_TASKS_RCU - unsigned long rcu_tasks_nvcsw; - u8 rcu_tasks_holdout; - u8 rcu_tasks_idx; - int rcu_tasks_idle_cpu; - struct list_head rcu_tasks_holdout_list; - int rcu_tasks_exit_cpu; - struct list_head rcu_tasks_exit_list; -#endif /* #ifdef CONFIG_TASKS_RCU */ - -#ifdef CONFIG_TASKS_TRACE_RCU - int trc_reader_nesting; - int trc_ipi_to_cpu; - union rcu_special trc_reader_special; - struct list_head trc_holdout_list; - struct list_head trc_blkd_node; - int trc_blkd_cpu; -#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ - - struct sched_info sched_info; - - struct list_head tasks; - struct plist_node pushable_tasks; - struct rb_node pushable_dl_tasks; - - struct mm_struct *mm; - struct mm_struct *active_mm; - struct address_space *faults_disabled_mapping; - - int exit_state; - int exit_code; - int exit_signal; - /* The signal sent when the parent dies: */ - int pdeath_signal; - /* JOBCTL_*, siglock protected: */ - unsigned long jobctl; - - /* Used for emulating ABI behavior of previous Linux versions: */ - unsigned int personality; - - /* Scheduler bits, serialized by scheduler locks: */ - unsigned sched_reset_on_fork:1; - unsigned sched_contributes_to_load:1; - unsigned sched_migrated:1; - unsigned sched_task_hot:1; - - /* Force alignment to the next boundary: */ - unsigned :0; - - /* Unserialized, strictly 'current' */ - - /* - * This field must not be in the scheduler word above due to wakelist - * queueing no longer being serialized by p->on_cpu. However: - * - * p->XXX =3D X; ttwu() - * schedule() if (p->on_rq && ..) // false - * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //tr= ue - * deactivate_task() ttwu_queue_wakelist()) - * p->on_rq =3D 0; p->sched_remote_wakeup =3D Y; - * - * guarantees all stores of 'current' are visible before - * ->sched_remote_wakeup gets used, so it can be in this word. - */ - unsigned sched_remote_wakeup:1; -#ifdef CONFIG_RT_MUTEXES - unsigned sched_rt_mutex:1; -#endif - - /* Bit to tell TOMOYO we're in execve(): */ - unsigned in_execve:1; - unsigned in_iowait:1; -#ifndef TIF_RESTORE_SIGMASK - unsigned restore_sigmask:1; -#endif -#ifdef CONFIG_MEMCG_V1 - unsigned in_user_fault:1; -#endif -#ifdef CONFIG_LRU_GEN - /* whether the LRU algorithm may apply to this access */ - unsigned in_lru_fault:1; -#endif -#ifdef CONFIG_COMPAT_BRK - unsigned brk_randomized:1; -#endif -#ifdef CONFIG_CGROUPS - /* disallow userland-initiated cgroup migration */ - unsigned no_cgroup_migration:1; - /* task is frozen/stopped (used by the cgroup freezer) */ - unsigned frozen:1; -#endif -#ifdef CONFIG_BLK_CGROUP - unsigned use_memdelay:1; -#endif -#ifdef CONFIG_PSI - /* Stalled due to lack of memory */ - unsigned in_memstall:1; -#endif -#ifdef CONFIG_PAGE_OWNER - /* Used by page_owner=3Don to detect recursion in page tracking. */ - unsigned in_page_owner:1; -#endif -#ifdef CONFIG_EVENTFD - /* Recursion prevention for eventfd_signal() */ - unsigned in_eventfd:1; -#endif -#ifdef CONFIG_ARCH_HAS_CPU_PASID - unsigned pasid_activated:1; -#endif -#ifdef CONFIG_X86_BUS_LOCK_DETECT - unsigned reported_split_lock:1; -#endif -#ifdef CONFIG_TASK_DELAY_ACCT - /* delay due to memory thrashing */ - unsigned in_thrashing:1; -#endif -#ifdef CONFIG_PREEMPT_RT - struct netdev_xmit net_xmit; -#endif - unsigned long atomic_flags; /* Flags requiring atomic access. */ - - struct restart_block restart_block; - - pid_t pid; - pid_t tgid; - -#ifdef CONFIG_STACKPROTECTOR - /* Canary value for the -fstack-protector GCC feature: */ - unsigned long stack_canary; -#endif - /* - * Pointers to the (original) parent process, youngest child, younger sib= ling, - * older sibling, respectively. (p->father can be replaced with - * p->real_parent->pid) - */ - - /* Real parent process: */ - struct task_struct __rcu *real_parent; - - /* Recipient of SIGCHLD, wait4() reports: */ - struct task_struct __rcu *parent; - - /* - * Children/sibling form the list of natural children: - */ - struct list_head children; - struct list_head sibling; - struct task_struct *group_leader; - - /* - * 'ptraced' is the list of tasks this task is using ptrace() on. - * - * This includes both natural children and PTRACE_ATTACH targets. - * 'ptrace_entry' is this task's link on the p->parent->ptraced list. - */ - struct list_head ptraced; - struct list_head ptrace_entry; - - /* PID/PID hash table linkage. */ - struct pid *thread_pid; - struct hlist_node pid_links[PIDTYPE_MAX]; - struct list_head thread_node; - - struct completion *vfork_done; - - /* CLONE_CHILD_SETTID: */ - int __user *set_child_tid; - - /* CLONE_CHILD_CLEARTID: */ - int __user *clear_child_tid; - - /* PF_KTHREAD | PF_IO_WORKER */ - void *worker_private; - - u64 utime; - u64 stime; -#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME - u64 utimescaled; - u64 stimescaled; -#endif - u64 gtime; - struct prev_cputime prev_cputime; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - struct vtime vtime; -#endif - -#ifdef CONFIG_NO_HZ_FULL - atomic_t tick_dep_mask; -#endif - /* Context switch counts: */ - unsigned long nvcsw; - unsigned long nivcsw; - - /* Monotonic time in nsecs: */ - u64 start_time; - - /* Boot based time in nsecs: */ - u64 start_boottime; - - /* MM fault and swap info: this can arguably be seen as either mm-specifi= c or thread-specific: */ - unsigned long min_flt; - unsigned long maj_flt; - - /* Empty if CONFIG_POSIX_CPUTIMERS=3Dn */ - struct posix_cputimers posix_cputimers; - -#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK - struct posix_cputimers_work posix_cputimers_work; -#endif - - /* Process credentials: */ - - /* Tracer's credentials at attach: */ - const struct cred __rcu *ptracer_cred; - - /* Objective and real subjective task credentials (COW): */ - const struct cred __rcu *real_cred; - - /* Effective (overridable) subjective task credentials (COW): */ - const struct cred __rcu *cred; - -#ifdef CONFIG_KEYS - /* Cached requested key. */ - struct key *cached_requested_key; -#endif - - /* - * executable name, excluding path. - * - * - normally initialized begin_new_exec() - * - set it with set_task_comm() - * - strscpy_pad() to ensure it is always NUL-terminated and - * zero-padded - * - task_lock() to ensure the operation is atomic and the name is - * fully updated. - */ - char comm[TASK_COMM_LEN]; - - struct nameidata *nameidata; - -#ifdef CONFIG_SYSVIPC - struct sysv_sem sysvsem; - struct sysv_shm sysvshm; -#endif -#ifdef CONFIG_DETECT_HUNG_TASK - unsigned long last_switch_count; - unsigned long last_switch_time; -#endif - /* Filesystem information: */ - struct fs_struct *fs; - - /* Open file information: */ - struct files_struct *files; - -#ifdef CONFIG_IO_URING - struct io_uring_task *io_uring; -#endif - - /* Namespaces: */ - struct nsproxy *nsproxy; - - /* Signal handlers: */ - struct signal_struct *signal; - struct sighand_struct __rcu *sighand; - sigset_t blocked; - sigset_t real_blocked; - /* Restored if set_restore_sigmask() was used: */ - sigset_t saved_sigmask; - struct sigpending pending; - unsigned long sas_ss_sp; - size_t sas_ss_size; - unsigned int sas_ss_flags; - - struct callback_head *task_works; - -#ifdef CONFIG_AUDIT -#ifdef CONFIG_AUDITSYSCALL - struct audit_context *audit_context; -#endif - kuid_t loginuid; - unsigned int sessionid; -#endif - struct seccomp seccomp; - struct syscall_user_dispatch syscall_dispatch; - - /* Thread group tracking: */ - u64 parent_exec_id; - u64 self_exec_id; - - /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems= _allowed, mempolicy: */ - spinlock_t alloc_lock; - - /* Protection of the PI data structures: */ - raw_spinlock_t pi_lock; - - struct wake_q_node wake_q; - -#ifdef CONFIG_RT_MUTEXES - /* PI waiters blocked on a rt_mutex held by this task: */ - struct rb_root_cached pi_waiters; - /* Updated under owner's pi_lock and rq lock */ - struct task_struct *pi_top_task; - /* Deadlock detection and priority inheritance handling: */ - struct rt_mutex_waiter *pi_blocked_on; -#endif - -#ifdef CONFIG_DEBUG_MUTEXES - /* Mutex deadlock detection: */ - struct mutex_waiter *blocked_on; -#endif - -#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER - struct mutex *blocker_mutex; -#endif - -#ifdef CONFIG_DEBUG_ATOMIC_SLEEP - int non_block_count; -#endif - -#ifdef CONFIG_TRACE_IRQFLAGS - struct irqtrace_events irqtrace; - unsigned int hardirq_threaded; - u64 hardirq_chain_key; - int softirqs_enabled; - int softirq_context; - int irq_config; -#endif -#ifdef CONFIG_PREEMPT_RT - int softirq_disable_cnt; -#endif - -#ifdef CONFIG_LOCKDEP -# define MAX_LOCK_DEPTH 48UL - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; -#endif - -#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) - unsigned int in_ubsan; -#endif - - /* Journalling filesystem info: */ - void *journal_info; - - /* Stacked block device info: */ - struct bio_list *bio_list; - - /* Stack plugging: */ - struct blk_plug *plug; - - /* VM state: */ - struct reclaim_state *reclaim_state; - - struct io_context *io_context; - -#ifdef CONFIG_COMPACTION - struct capture_control *capture_control; -#endif - /* Ptrace state: */ - unsigned long ptrace_message; - kernel_siginfo_t *last_siginfo; - - struct task_io_accounting ioac; -#ifdef CONFIG_PSI - /* Pressure stall state */ - unsigned int psi_flags; -#endif -#ifdef CONFIG_TASK_XACCT - /* Accumulated RSS usage: */ - u64 acct_rss_mem1; - /* Accumulated virtual memory usage: */ - u64 acct_vm_mem1; - /* stime + utime since last update: */ - u64 acct_timexpd; -#endif -#ifdef CONFIG_CPUSETS - /* Protected by ->alloc_lock: */ - nodemask_t mems_allowed; - /* Sequence number to catch updates: */ - seqcount_spinlock_t mems_allowed_seq; - int cpuset_mem_spread_rotor; -#endif -#ifdef CONFIG_CGROUPS - /* Control Group info protected by css_set_lock: */ - struct css_set __rcu *cgroups; - /* cg_list protected by css_set_lock and tsk->alloc_lock: */ - struct list_head cg_list; -#endif -#ifdef CONFIG_X86_CPU_RESCTRL - u32 closid; - u32 rmid; -#endif -#ifdef CONFIG_FUTEX - struct robust_list_head __user *robust_list; -#ifdef CONFIG_COMPAT - struct compat_robust_list_head __user *compat_robust_list; -#endif - struct list_head pi_state_list; - struct futex_pi_state *pi_state_cache; - struct mutex futex_exit_mutex; - unsigned int futex_state; -#endif -#ifdef CONFIG_PERF_EVENTS - u8 perf_recursion[PERF_NR_CONTEXTS]; - struct perf_event_context *perf_event_ctxp; - struct mutex perf_event_mutex; - struct list_head perf_event_list; - struct perf_ctx_data __rcu *perf_ctx_data; -#endif -#ifdef CONFIG_DEBUG_PREEMPT - unsigned long preempt_disable_ip; -#endif -#ifdef CONFIG_NUMA - /* Protected by alloc_lock: */ - struct mempolicy *mempolicy; - short il_prev; - u8 il_weight; - short pref_node_fork; -#endif -#ifdef CONFIG_NUMA_BALANCING - int numa_scan_seq; - unsigned int numa_scan_period; - unsigned int numa_scan_period_max; - int numa_preferred_nid; - unsigned long numa_migrate_retry; - /* Migration stamp: */ - u64 node_stamp; - u64 last_task_numa_placement; - u64 last_sum_exec_runtime; - struct callback_head numa_work; - - /* - * This pointer is only modified for current in syscall and - * pagefault context (and for tasks being destroyed), so it can be read - * from any of the following contexts: - * - RCU read-side critical section - * - current->numa_group from everywhere - * - task's runqueue locked, task not running - */ - struct numa_group __rcu *numa_group; - - /* - * numa_faults is an array split into four regions: - * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer - * in this precise order. - * - * faults_memory: Exponential decaying average of faults on a per-node - * basis. Scheduling placement decisions are made based on these - * counts. The values remain static for the duration of a PTE scan. - * faults_cpu: Track the nodes the process was running on when a NUMA - * hinting fault was incurred. - * faults_memory_buffer and faults_cpu_buffer: Record faults per node - * during the current scan window. When the scan completes, the counts - * in faults_memory and faults_cpu decay and these values are copied. - */ - unsigned long *numa_faults; - unsigned long total_numa_faults; - - /* - * numa_faults_locality tracks if faults recorded during the last - * scan window were remote/local or failed to migrate. The task scan - * period is adapted based on the locality of the faults with different - * weights depending on whether they were shared or private faults - */ - unsigned long numa_faults_locality[3]; - - unsigned long numa_pages_migrated; -#endif /* CONFIG_NUMA_BALANCING */ - -#ifdef CONFIG_RSEQ - struct rseq __user *rseq; - u32 rseq_len; - u32 rseq_sig; - /* - * RmW on rseq_event_mask must be performed atomically - * with respect to preemption. - */ - unsigned long rseq_event_mask; -# ifdef CONFIG_DEBUG_RSEQ - /* - * This is a place holder to save a copy of the rseq fields for - * validation of read-only fields. The struct rseq has a - * variable-length array at the end, so it cannot be used - * directly. Reserve a size large enough for the known fields. - */ - char rseq_fields[sizeof(struct rseq)]; -# endif -#endif - -#ifdef CONFIG_SCHED_MM_CID - int mm_cid; /* Current cid in mm */ - int last_mm_cid; /* Most recent cid in mm */ - int migrate_from_cpu; - int mm_cid_active; /* Whether cid bitmap is active */ - struct callback_head cid_work; -#endif - - struct tlbflush_unmap_batch tlb_ubc; - - /* Cache last used pipe for splice(): */ - struct pipe_inode_info *splice_pipe; - - struct page_frag task_frag; - -#ifdef CONFIG_TASK_DELAY_ACCT - struct task_delay_info *delays; -#endif - -#ifdef CONFIG_FAULT_INJECTION - int make_it_fail; - unsigned int fail_nth; -#endif - /* - * When (nr_dirtied >=3D nr_dirtied_pause), it's time to call - * balance_dirty_pages() for a dirty throttling pause: - */ - int nr_dirtied; - int nr_dirtied_pause; - /* Start of a write-and-pause period: */ - unsigned long dirty_paused_when; - -#ifdef CONFIG_LATENCYTOP - int latency_record_count; - struct latency_record latency_record[LT_SAVECOUNT]; -#endif - /* - * Time slack values; these are used to round up poll() and - * select() etc timeout values. These are in nanoseconds. - */ - u64 timer_slack_ns; - u64 default_timer_slack_ns; - -#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) - unsigned int kasan_depth; -#endif - -#ifdef CONFIG_KCSAN - struct kcsan_ctx kcsan_ctx; -#ifdef CONFIG_TRACE_IRQFLAGS - struct irqtrace_events kcsan_save_irqtrace; -#endif -#ifdef CONFIG_KCSAN_WEAK_MEMORY - int kcsan_stack_depth; -#endif -#endif - -#ifdef CONFIG_KMSAN - struct kmsan_ctx kmsan_ctx; -#endif - -#if IS_ENABLED(CONFIG_KUNIT) - struct kunit *kunit_test; -#endif - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - /* Index of current stored address in ret_stack: */ - int curr_ret_stack; - int curr_ret_depth; - - /* Stack of return addresses for return function tracing: */ - unsigned long *ret_stack; - - /* Timestamp for last schedule: */ - unsigned long long ftrace_timestamp; - unsigned long long ftrace_sleeptime; - - /* - * Number of functions that haven't been traced - * because of depth overrun: - */ - atomic_t trace_overrun; - - /* Pause tracing: */ - atomic_t tracing_graph_pause; -#endif - -#ifdef CONFIG_TRACING - /* Bitmask and counter of trace recursion: */ - unsigned long trace_recursion; -#endif /* CONFIG_TRACING */ - -#ifdef CONFIG_KCOV - /* See kernel/kcov.c for more details. */ - - /* Coverage collection mode enabled for this task (0 if disabled): */ - unsigned int kcov_mode; - - /* Size of the kcov_area: */ - unsigned int kcov_size; - - /* Buffer for coverage collection: */ - void *kcov_area; - - /* KCOV descriptor wired with this task or NULL: */ - struct kcov *kcov; - - /* KCOV common handle for remote coverage collection: */ - u64 kcov_handle; - - /* KCOV sequence number: */ - int kcov_sequence; - - /* Collect coverage from softirq context: */ - unsigned int kcov_softirq; -#endif - -#ifdef CONFIG_MEMCG_V1 - struct mem_cgroup *memcg_in_oom; -#endif - -#ifdef CONFIG_MEMCG - /* Number of pages to reclaim on returning to userland: */ - unsigned int memcg_nr_pages_over_high; - - /* Used by memcontrol for targeted memcg charge: */ - struct mem_cgroup *active_memcg; - - /* Cache for current->cgroups->memcg->objcg lookups: */ - struct obj_cgroup *objcg; -#endif - -#ifdef CONFIG_BLK_CGROUP - struct gendisk *throttle_disk; -#endif - -#ifdef CONFIG_UPROBES - struct uprobe_task *utask; -#endif -#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) - unsigned int sequential_io; - unsigned int sequential_io_avg; -#endif - struct kmap_ctrl kmap_ctrl; -#ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; -# ifdef CONFIG_PREEMPT_RT - unsigned long saved_state_change; -# endif -#endif - struct rcu_head rcu; - refcount_t rcu_users; - int pagefault_disabled; -#ifdef CONFIG_MMU - struct task_struct *oom_reaper_list; - struct timer_list oom_reaper_timer; -#endif -#ifdef CONFIG_VMAP_STACK - struct vm_struct *stack_vm_area; -#endif -#ifdef CONFIG_THREAD_INFO_IN_TASK - /* A live task holds one reference: */ - refcount_t stack_refcount; -#endif -#ifdef CONFIG_LIVEPATCH - int patch_state; -#endif -#ifdef CONFIG_SECURITY - /* Used by LSM modules for access restriction: */ - void *security; -#endif -#ifdef CONFIG_BPF_SYSCALL - /* Used by BPF task local storage */ - struct bpf_local_storage __rcu *bpf_storage; - /* Used for BPF run context */ - struct bpf_run_ctx *bpf_ctx; -#endif - /* Used by BPF for per-TASK xdp storage */ - struct bpf_net_context *bpf_net_context; - -#ifdef CONFIG_GCC_PLUGIN_STACKLEAK - unsigned long lowest_stack; - unsigned long prev_lowest_stack; -#endif - -#ifdef CONFIG_X86_MCE - void __user *mce_vaddr; - __u64 mce_kflags; - u64 mce_addr; - __u64 mce_ripv : 1, - mce_whole_page : 1, - __mce_reserved : 62; - struct callback_head mce_kill_me; - int mce_count; -#endif - -#ifdef CONFIG_KRETPROBES - struct llist_head kretprobe_instances; -#endif -#ifdef CONFIG_RETHOOK - struct llist_head rethooks; -#endif - -#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH - /* - * If L1D flush is supported on mm context switch - * then we use this callback head to queue kill work - * to kill tasks that are not running on SMT disabled - * cores - */ - struct callback_head l1d_flush_kill; -#endif - -#ifdef CONFIG_RV - /* - * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS. - * If we find justification for more monitors, we can think - * about adding more or developing a dynamic method. So far, - * none of these are justified. - */ - union rv_task_monitor rv[RV_PER_TASK_MONITORS]; -#endif - -#ifdef CONFIG_USER_EVENTS - struct user_event_mm *user_event_mm; -#endif - - /* CPU-specific state of this task: */ - struct thread_struct thread; - - /* - * New fields for task_struct should be added above here, so that - * they are included in the randomized portion of task_struct. - */ - randomized_struct_fields_end -} __attribute__ ((aligned (64))); - #define TASK_REPORT_IDLE (TASK_REPORT + 1) #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) =20 diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h index 969aaf5ef9d6..e99751be1c15 100644 --- a/include/linux/sched/types.h +++ b/include/linux/sched/types.h @@ -2,7 +2,72 @@ #ifndef _LINUX_SCHED_TYPES_H #define _LINUX_SCHED_TYPES_H =20 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include + +/* task_struct member predeclarations (sorted alphabetically): */ +struct audit_context; +struct bio_list; +struct blk_plug; +struct bpf_local_storage; +struct bpf_net_context; +struct bpf_run_ctx; +struct capture_control; +struct cfs_rq; +struct fs_struct; +struct futex_pi_state; +struct io_context; +struct io_uring_task; +struct mempolicy; +struct nameidata; +struct nsproxy; +struct perf_ctx_data; +struct perf_event_context; +struct pipe_inode_info; +struct reclaim_state; +struct robust_list_head; +struct rq; +struct sched_dl_entity; +struct sighand_struct; +struct signal_struct; +struct task_delay_info; +struct task_struct; +struct user_event_mm; =20 /** * struct task_cputime - collected CPU time counts @@ -20,4 +85,1283 @@ struct task_cputime { unsigned long long sum_exec_runtime; }; =20 +/* + * Define the task command name length as enum, then it can be visible to + * BPF programs. + */ +enum { + TASK_COMM_LEN =3D 16, +}; + +/** + * struct prev_cputime - snapshot of system and user cputime + * @utime: time spent in user mode + * @stime: time spent in system mode + * @lock: protects the above two fields + * + * Stores previous user/system time values such that we can guarantee + * monotonicity. + */ +struct prev_cputime { +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + u64 utime; + u64 stime; + raw_spinlock_t lock; +#endif +}; + +enum vtime_state { + /* Task is sleeping or running in a CPU with VTIME inactive: */ + VTIME_INACTIVE =3D 0, + /* Task is idle */ + VTIME_IDLE, + /* Task runs in kernelspace in a CPU with VTIME active: */ + VTIME_SYS, + /* Task runs in userspace in a CPU with VTIME active: */ + VTIME_USER, + /* Task runs as guests in a CPU with VTIME active: */ + VTIME_GUEST, +}; + +struct vtime { + seqcount_t seqcount; + unsigned long long starttime; + enum vtime_state state; + unsigned int cpu; + u64 utime; + u64 stime; + u64 gtime; +}; + +struct sched_info { +#ifdef CONFIG_SCHED_INFO + /* Cumulative counters: */ + + /* # of times we have run on this CPU: */ + unsigned long pcount; + + /* Time spent waiting on a runqueue: */ + unsigned long long run_delay; + + /* Max time spent waiting on a runqueue: */ + unsigned long long max_run_delay; + + /* Min time spent waiting on a runqueue: */ + unsigned long long min_run_delay; + + /* Timestamps: */ + + /* When did we last run on a CPU? */ + unsigned long long last_arrival; + + /* When were we last queued to run? */ + unsigned long long last_queued; + +#endif /* CONFIG_SCHED_INFO */ +}; + +/* + * Integer metrics need fixed point arithmetic, e.g., sched/fair + * has a few: load, load_avg, util_avg, freq, and capacity. + * + * We define a basic fixed point arithmetic range, and then formalize + * all these metrics based on that basic range. + */ +# define SCHED_FIXEDPOINT_SHIFT 10 +# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) + +/* Increase resolution of cpu_capacity calculations */ +# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT +# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) + +struct load_weight { + unsigned long weight; + u32 inv_weight; +}; + +/* + * The load/runnable/util_avg accumulates an infinite geometric series + * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c). + * + * [load_avg definition] + * + * load_avg =3D runnable% * scale_load_down(load) + * + * [runnable_avg definition] + * + * runnable_avg =3D runnable% * SCHED_CAPACITY_SCALE + * + * [util_avg definition] + * + * util_avg =3D running% * SCHED_CAPACITY_SCALE + * + * where runnable% is the time ratio that a sched_entity is runnable and + * running% the time ratio that a sched_entity is running. + * + * For cfs_rq, they are the aggregated values of all runnable and blocked + * sched_entities. + * + * The load/runnable/util_avg doesn't directly factor frequency scaling an= d CPU + * capacity scaling. The scaling is done through the rq_clock_pelt that is= used + * for computing those signals (see update_rq_clock_pelt()) + * + * N.B., the above ratios (runnable% and running%) themselves are in the + * range of [0, 1]. To do fixed point arithmetics, we therefore scale them + * to as large a range as necessary. This is for example reflected by + * util_avg's SCHED_CAPACITY_SCALE. + * + * [Overflow issue] + * + * The 64-bit load_sum can have 4353082796 (=3D2^64/47742/88761) entities + * with the highest load (=3D88761), always runnable on a single cfs_rq, + * and should not overflow as the number already hits PID_MAX_LIMIT. + * + * For all other cases (including 32-bit kernels), struct load_weight's + * weight will overflow first before we do, because: + * + * Max(load_avg) <=3D Max(load.weight) + * + * Then it is the load_weight's responsibility to consider overflow + * issues. + */ +struct sched_avg { + u64 last_update_time; + u64 load_sum; + u64 runnable_sum; + u32 util_sum; + u32 period_contrib; + unsigned long load_avg; + unsigned long runnable_avg; + unsigned long util_avg; + unsigned int util_est; +} ____cacheline_aligned; + +/* + * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_a= vg + * updates. When a task is dequeued, its util_est should not be updated if= its + * util_avg has not been updated in the meantime. + * This information is mapped into the MSB bit of util_est at dequeue time. + * Since max value of util_est for a task is 1024 (PELT util_avg for a tas= k) + * it is safe to use MSB. + */ +#define UTIL_EST_WEIGHT_SHIFT 2 +#define UTIL_AVG_UNCHANGED 0x80000000 + +struct sched_statistics { +#ifdef CONFIG_SCHEDSTATS + u64 wait_start; + u64 wait_max; + u64 wait_count; + u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; + + u64 sleep_start; + u64 sleep_max; + s64 sum_sleep_runtime; + + u64 block_start; + u64 block_max; + s64 sum_block_runtime; + + s64 exec_max; + u64 slice_max; + + u64 nr_migrations_cold; + u64 nr_failed_migrations_affine; + u64 nr_failed_migrations_running; + u64 nr_failed_migrations_hot; + u64 nr_forced_migrations; + + u64 nr_wakeups; + u64 nr_wakeups_sync; + u64 nr_wakeups_migrate; + u64 nr_wakeups_local; + u64 nr_wakeups_remote; + u64 nr_wakeups_affine; + u64 nr_wakeups_affine_attempts; + u64 nr_wakeups_passive; + u64 nr_wakeups_idle; + +#ifdef CONFIG_SCHED_CORE + u64 core_forceidle_sum; +#endif +#endif /* CONFIG_SCHEDSTATS */ +} ____cacheline_aligned; + +struct sched_entity { + /* For load-balancing: */ + struct load_weight load; + struct rb_node run_node; + u64 deadline; + u64 min_vruntime; + u64 min_slice; + + struct list_head group_node; + unsigned char on_rq; + unsigned char sched_delayed; + unsigned char rel_deadline; + unsigned char custom_slice; + /* hole */ + + u64 exec_start; + u64 sum_exec_runtime; + u64 prev_sum_exec_runtime; + u64 vruntime; + s64 vlag; + u64 slice; + + u64 nr_migrations; + +#ifdef CONFIG_FAIR_GROUP_SCHED + int depth; + struct sched_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct cfs_rq *cfs_rq; + /* rq "owned" by this entity/group: */ + struct cfs_rq *my_q; + /* cached value of my_q->h_nr_running */ + unsigned long runnable_weight; +#endif + + /* + * Per entity load average tracking. + * + * Put into separate cache line so it does not + * collide with read-mostly values above. + */ + struct sched_avg avg; +}; + +struct sched_rt_entity { + struct list_head run_list; + unsigned long timeout; + unsigned long watchdog_stamp; + unsigned int time_slice; + unsigned short on_rq; + unsigned short on_list; + + struct sched_rt_entity *back; +#ifdef CONFIG_RT_GROUP_SCHED + struct sched_rt_entity *parent; + /* rq on which this entity is (to be) queued: */ + struct rt_rq *rt_rq; + /* rq "owned" by this entity/group: */ + struct rt_rq *my_q; +#endif +} __randomize_layout; + +typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *); +typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *); + +struct sched_dl_entity { + struct rb_node rb_node; + + /* + * Original scheduling parameters. Copied here from sched_attr + * during sched_setattr(), they will remain the same until + * the next sched_setattr(). + */ + u64 dl_runtime; /* Maximum runtime for each instance */ + u64 dl_deadline; /* Relative deadline of each instance */ + u64 dl_period; /* Separation of two instances (period) */ + u64 dl_bw; /* dl_runtime / dl_period */ + u64 dl_density; /* dl_runtime / dl_deadline */ + + /* + * Actual scheduling parameters. Initialized with the values above, + * they are continuously updated during task execution. Note that + * the remaining runtime could be < 0 in case we are in overrun. + */ + s64 runtime; /* Remaining runtime for this instance */ + u64 deadline; /* Absolute deadline for this instance */ + unsigned int flags; /* Specifying the scheduler behaviour */ + + /* + * Some bool flags: + * + * @dl_throttled tells if we exhausted the runtime. If so, the + * task has to wait for a replenishment to be performed at the + * next firing of dl_timer. + * + * @dl_yielded tells if task gave up the CPU before consuming + * all its available runtime during the last job. + * + * @dl_non_contending tells if the task is inactive while still + * contributing to the active utilization. In other words, it + * indicates if the inactive timer has been armed and its handler + * has not been executed yet. This flag is useful to avoid race + * conditions between the inactive timer handler and the wakeup + * code. + * + * @dl_overrun tells if the task asked to be informed about runtime + * overruns. + * + * @dl_server tells if this is a server entity. + * + * @dl_defer tells if this is a deferred or regular server. For + * now only defer server exists. + * + * @dl_defer_armed tells if the deferrable server is waiting + * for the replenishment timer to activate it. + * + * @dl_server_active tells if the dlserver is active(started). + * dlserver is started on first cfs enqueue on an idle runqueue + * and is stopped when a dequeue results in 0 cfs tasks on the + * runqueue. In other words, dlserver is active only when cpu's + * runqueue has atleast one cfs task. + * + * @dl_defer_running tells if the deferrable server is actually + * running, skipping the defer phase. + */ + unsigned int dl_throttled : 1; + unsigned int dl_yielded : 1; + unsigned int dl_non_contending : 1; + unsigned int dl_overrun : 1; + unsigned int dl_server : 1; + unsigned int dl_server_active : 1; + unsigned int dl_defer : 1; + unsigned int dl_defer_armed : 1; + unsigned int dl_defer_running : 1; + + /* + * Bandwidth enforcement timer. Each -deadline task has its + * own bandwidth to be enforced, thus we need one timer per task. + */ + struct hrtimer dl_timer; + + /* + * Inactive timer, responsible for decreasing the active utilization + * at the "0-lag time". When a -deadline task blocks, it contributes + * to GRUB's active utilization until the "0-lag time", hence a + * timer is needed to decrease the active utilization at the correct + * time. + */ + struct hrtimer inactive_timer; + + /* + * Bits for DL-server functionality. Also see the comment near + * dl_server_update(). + * + * @rq the runqueue this server is for + * + * @server_has_tasks() returns true if @server_pick return a + * runnable task. + */ + struct rq *rq; + dl_server_has_tasks_f server_has_tasks; + dl_server_pick_f server_pick_task; + +#ifdef CONFIG_RT_MUTEXES + /* + * Priority Inheritance. When a DEADLINE scheduling entity is boosted + * pi_se points to the donor, otherwise points to the dl_se it belongs + * to (the original one/itself). + */ + struct sched_dl_entity *pi_se; +#endif +}; + +#ifdef CONFIG_UCLAMP_TASK +/* Number of utilization clamp buckets (shorter alias) */ +#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT + +/* + * Utilization clamp for a scheduling entity + * @value: clamp value "assigned" to a se + * @bucket_id: bucket index corresponding to the "assigned" value + * @active: the se is currently refcounted in a rq's bucket + * @user_defined: the requested clamp value comes from user-space + * + * The bucket_id is the index of the clamp bucket matching the clamp value + * which is pre-computed and stored to avoid expensive integer divisions f= rom + * the fast path. + * + * The active bit is set whenever a task has got an "effective" value assi= gned, + * which can be different from the clamp value "requested" from user-space. + * This allows to know a task is refcounted in the rq's bucket correspondi= ng + * to the "effective" bucket_id. + * + * The user_defined bit is set whenever a task has got a task-specific cla= mp + * value requested from userspace, i.e. the system defaults apply to this = task + * just as a restriction. This allows to relax default clamps when a less + * restrictive task-specific value has been requested, thus allowing to + * implement a "nice" semantic. For example, a task running with a 20% + * default boost can still drop its own boosting to 0%. + */ +struct uclamp_se { + unsigned int value : bits_per(SCHED_CAPACITY_SCALE); + unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); + unsigned int active : 1; + unsigned int user_defined : 1; +}; +#endif /* CONFIG_UCLAMP_TASK */ + +union rcu_special { + struct { + u8 blocked; + u8 need_qs; + u8 exp_hint; /* Hint for performance. */ + u8 need_mb; /* Readers need smp_mb(). */ + } b; /* Bits. */ + u32 s; /* Set of bits. */ +}; + +/* + * Number of contexts where an event can trigger: + * task, softirq, hardirq, nmi. + */ +#define PERF_NR_CONTEXTS 4 + +struct wake_q_node { + struct wake_q_node *next; +}; + +struct kmap_ctrl { +#ifdef CONFIG_KMAP_LOCAL + int idx; + pte_t pteval[KM_MAX_IDX]; +#endif +}; + +struct task_struct { +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* + * For reasons of header soup (see current_thread_info()), this + * must be the first element of task_struct. + */ + struct thread_info thread_info; +#endif + unsigned int __state; + + /* saved state for "spinlock sleepers" */ + unsigned int saved_state; + + /* + * This begins the randomizable portion of task_struct. Only + * scheduling-critical items should be added above here. + */ + randomized_struct_fields_start + + void *stack; + refcount_t usage; + /* Per task flags (PF_*), defined further below: */ + unsigned int flags; + unsigned int ptrace; + +#ifdef CONFIG_MEM_ALLOC_PROFILING + struct alloc_tag *alloc_tag; +#endif + + int on_cpu; + struct __call_single_node wake_entry; + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; + + /* + * recent_used_cpu is initially set as the last CPU used by a task + * that wakes affine another task. Waker/wakee relationships can + * push tasks around a CPU where each wakeup moves to the next one. + * Tracking a recently used CPU allows a quick search for a recently + * used CPU that may be idle. + */ + int recent_used_cpu; + int wake_cpu; + int on_rq; + + int prio; + int static_prio; + int normal_prio; + unsigned int rt_priority; + + struct sched_entity se; + struct sched_rt_entity rt; + struct sched_dl_entity dl; + struct sched_dl_entity *dl_server; +#ifdef CONFIG_SCHED_CLASS_EXT + struct sched_ext_entity scx; +#endif + const struct sched_class *sched_class; + +#ifdef CONFIG_SCHED_CORE + struct rb_node core_node; + unsigned long core_cookie; + unsigned int core_occupation; +#endif + +#ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; +#endif + + +#ifdef CONFIG_UCLAMP_TASK + /* + * Clamp values requested for a scheduling entity. + * Must be updated with task_rq_lock() held. + */ + struct uclamp_se uclamp_req[UCLAMP_CNT]; + /* + * Effective clamp values used for a scheduling entity. + * Must be updated with task_rq_lock() held. + */ + struct uclamp_se uclamp[UCLAMP_CNT]; +#endif + + struct sched_statistics stats; + +#ifdef CONFIG_PREEMPT_NOTIFIERS + /* List of struct preempt_notifier: */ + struct hlist_head preempt_notifiers; +#endif + +#ifdef CONFIG_BLK_DEV_IO_TRACE + unsigned int btrace_seq; +#endif + + unsigned int policy; + unsigned long max_allowed_capacity; + int nr_cpus_allowed; + const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; + cpumask_t cpus_mask; + void *migration_pending; + unsigned short migration_disabled; + unsigned short migration_flags; + +#ifdef CONFIG_PREEMPT_RCU + int rcu_read_lock_nesting; + union rcu_special rcu_read_unlock_special; + struct list_head rcu_node_entry; + struct rcu_node *rcu_blocked_node; +#endif /* #ifdef CONFIG_PREEMPT_RCU */ + +#ifdef CONFIG_TASKS_RCU + unsigned long rcu_tasks_nvcsw; + u8 rcu_tasks_holdout; + u8 rcu_tasks_idx; + int rcu_tasks_idle_cpu; + struct list_head rcu_tasks_holdout_list; + int rcu_tasks_exit_cpu; + struct list_head rcu_tasks_exit_list; +#endif /* #ifdef CONFIG_TASKS_RCU */ + +#ifdef CONFIG_TASKS_TRACE_RCU + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + struct list_head trc_holdout_list; + struct list_head trc_blkd_node; + int trc_blkd_cpu; +#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ + + struct sched_info sched_info; + + struct list_head tasks; + struct plist_node pushable_tasks; + struct rb_node pushable_dl_tasks; + + struct mm_struct *mm; + struct mm_struct *active_mm; + struct address_space *faults_disabled_mapping; + + int exit_state; + int exit_code; + int exit_signal; + /* The signal sent when the parent dies: */ + int pdeath_signal; + /* JOBCTL_*, siglock protected: */ + unsigned long jobctl; + + /* Used for emulating ABI behavior of previous Linux versions: */ + unsigned int personality; + + /* Scheduler bits, serialized by scheduler locks: */ + unsigned sched_reset_on_fork:1; + unsigned sched_contributes_to_load:1; + unsigned sched_migrated:1; + unsigned sched_task_hot:1; + + /* Force alignment to the next boundary: */ + unsigned :0; + + /* Unserialized, strictly 'current' */ + + /* + * This field must not be in the scheduler word above due to wakelist + * queueing no longer being serialized by p->on_cpu. However: + * + * p->XXX =3D X; ttwu() + * schedule() if (p->on_rq && ..) // false + * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //tr= ue + * deactivate_task() ttwu_queue_wakelist()) + * p->on_rq =3D 0; p->sched_remote_wakeup =3D Y; + * + * guarantees all stores of 'current' are visible before + * ->sched_remote_wakeup gets used, so it can be in this word. + */ + unsigned sched_remote_wakeup:1; +#ifdef CONFIG_RT_MUTEXES + unsigned sched_rt_mutex:1; +#endif + + /* Bit to tell TOMOYO we're in execve(): */ + unsigned in_execve:1; + unsigned in_iowait:1; +#ifndef TIF_RESTORE_SIGMASK + unsigned restore_sigmask:1; +#endif +#ifdef CONFIG_MEMCG_V1 + unsigned in_user_fault:1; +#endif +#ifdef CONFIG_LRU_GEN + /* whether the LRU algorithm may apply to this access */ + unsigned in_lru_fault:1; +#endif +#ifdef CONFIG_COMPAT_BRK + unsigned brk_randomized:1; +#endif +#ifdef CONFIG_CGROUPS + /* disallow userland-initiated cgroup migration */ + unsigned no_cgroup_migration:1; + /* task is frozen/stopped (used by the cgroup freezer) */ + unsigned frozen:1; +#endif +#ifdef CONFIG_BLK_CGROUP + unsigned use_memdelay:1; +#endif +#ifdef CONFIG_PSI + /* Stalled due to lack of memory */ + unsigned in_memstall:1; +#endif +#ifdef CONFIG_PAGE_OWNER + /* Used by page_owner=3Don to detect recursion in page tracking. */ + unsigned in_page_owner:1; +#endif +#ifdef CONFIG_EVENTFD + /* Recursion prevention for eventfd_signal() */ + unsigned in_eventfd:1; +#endif +#ifdef CONFIG_ARCH_HAS_CPU_PASID + unsigned pasid_activated:1; +#endif +#ifdef CONFIG_X86_BUS_LOCK_DETECT + unsigned reported_split_lock:1; +#endif +#ifdef CONFIG_TASK_DELAY_ACCT + /* delay due to memory thrashing */ + unsigned in_thrashing:1; +#endif +#ifdef CONFIG_PREEMPT_RT + struct netdev_xmit net_xmit; +#endif + unsigned long atomic_flags; /* Flags requiring atomic access. */ + + struct restart_block restart_block; + + pid_t pid; + pid_t tgid; + +#ifdef CONFIG_STACKPROTECTOR + /* Canary value for the -fstack-protector GCC feature: */ + unsigned long stack_canary; +#endif + /* + * Pointers to the (original) parent process, youngest child, younger sib= ling, + * older sibling, respectively. (p->father can be replaced with + * p->real_parent->pid) + */ + + /* Real parent process: */ + struct task_struct __rcu *real_parent; + + /* Recipient of SIGCHLD, wait4() reports: */ + struct task_struct __rcu *parent; + + /* + * Children/sibling form the list of natural children: + */ + struct list_head children; + struct list_head sibling; + struct task_struct *group_leader; + + /* + * 'ptraced' is the list of tasks this task is using ptrace() on. + * + * This includes both natural children and PTRACE_ATTACH targets. + * 'ptrace_entry' is this task's link on the p->parent->ptraced list. + */ + struct list_head ptraced; + struct list_head ptrace_entry; + + /* PID/PID hash table linkage. */ + struct pid *thread_pid; + struct hlist_node pid_links[PIDTYPE_MAX]; + struct list_head thread_node; + + struct completion *vfork_done; + + /* CLONE_CHILD_SETTID: */ + int __user *set_child_tid; + + /* CLONE_CHILD_CLEARTID: */ + int __user *clear_child_tid; + + /* PF_KTHREAD | PF_IO_WORKER */ + void *worker_private; + + u64 utime; + u64 stime; +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME + u64 utimescaled; + u64 stimescaled; +#endif + u64 gtime; + struct prev_cputime prev_cputime; +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + struct vtime vtime; +#endif + +#ifdef CONFIG_NO_HZ_FULL + atomic_t tick_dep_mask; +#endif + /* Context switch counts: */ + unsigned long nvcsw; + unsigned long nivcsw; + + /* Monotonic time in nsecs: */ + u64 start_time; + + /* Boot based time in nsecs: */ + u64 start_boottime; + + /* MM fault and swap info: this can arguably be seen as either mm-specifi= c or thread-specific: */ + unsigned long min_flt; + unsigned long maj_flt; + + /* Empty if CONFIG_POSIX_CPUTIMERS=3Dn */ + struct posix_cputimers posix_cputimers; + +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK + struct posix_cputimers_work posix_cputimers_work; +#endif + + /* Process credentials: */ + + /* Tracer's credentials at attach: */ + const struct cred __rcu *ptracer_cred; + + /* Objective and real subjective task credentials (COW): */ + const struct cred __rcu *real_cred; + + /* Effective (overridable) subjective task credentials (COW): */ + const struct cred __rcu *cred; + +#ifdef CONFIG_KEYS + /* Cached requested key. */ + struct key *cached_requested_key; +#endif + + /* + * executable name, excluding path. + * + * - normally initialized begin_new_exec() + * - set it with set_task_comm() + * - strscpy_pad() to ensure it is always NUL-terminated and + * zero-padded + * - task_lock() to ensure the operation is atomic and the name is + * fully updated. + */ + char comm[TASK_COMM_LEN]; + + struct nameidata *nameidata; + +#ifdef CONFIG_SYSVIPC + struct sysv_sem sysvsem; + struct sysv_shm sysvshm; +#endif +#ifdef CONFIG_DETECT_HUNG_TASK + unsigned long last_switch_count; + unsigned long last_switch_time; +#endif + /* Filesystem information: */ + struct fs_struct *fs; + + /* Open file information: */ + struct files_struct *files; + +#ifdef CONFIG_IO_URING + struct io_uring_task *io_uring; +#endif + + /* Namespaces: */ + struct nsproxy *nsproxy; + + /* Signal handlers: */ + struct signal_struct *signal; + struct sighand_struct __rcu *sighand; + sigset_t blocked; + sigset_t real_blocked; + /* Restored if set_restore_sigmask() was used: */ + sigset_t saved_sigmask; + struct sigpending pending; + unsigned long sas_ss_sp; + size_t sas_ss_size; + unsigned int sas_ss_flags; + + struct callback_head *task_works; + +#ifdef CONFIG_AUDIT +#ifdef CONFIG_AUDITSYSCALL + struct audit_context *audit_context; +#endif + kuid_t loginuid; + unsigned int sessionid; +#endif + struct seccomp seccomp; + struct syscall_user_dispatch syscall_dispatch; + + /* Thread group tracking: */ + u64 parent_exec_id; + u64 self_exec_id; + + /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems= _allowed, mempolicy: */ + spinlock_t alloc_lock; + + /* Protection of the PI data structures: */ + raw_spinlock_t pi_lock; + + struct wake_q_node wake_q; + +#ifdef CONFIG_RT_MUTEXES + /* PI waiters blocked on a rt_mutex held by this task: */ + struct rb_root_cached pi_waiters; + /* Updated under owner's pi_lock and rq lock */ + struct task_struct *pi_top_task; + /* Deadlock detection and priority inheritance handling: */ + struct rt_mutex_waiter *pi_blocked_on; +#endif + +#ifdef CONFIG_DEBUG_MUTEXES + /* Mutex deadlock detection: */ + struct mutex_waiter *blocked_on; +#endif + +#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER + struct mutex *blocker_mutex; +#endif + +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + int non_block_count; +#endif + +#ifdef CONFIG_TRACE_IRQFLAGS + struct irqtrace_events irqtrace; + unsigned int hardirq_threaded; + u64 hardirq_chain_key; + int softirqs_enabled; + int softirq_context; + int irq_config; +#endif +#ifdef CONFIG_PREEMPT_RT + int softirq_disable_cnt; +#endif + +#ifdef CONFIG_LOCKDEP +# define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; + int lockdep_depth; + unsigned int lockdep_recursion; + struct held_lock held_locks[MAX_LOCK_DEPTH]; +#endif + +#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) + unsigned int in_ubsan; +#endif + + /* Journalling filesystem info: */ + void *journal_info; + + /* Stacked block device info: */ + struct bio_list *bio_list; + + /* Stack plugging: */ + struct blk_plug *plug; + + /* VM state: */ + struct reclaim_state *reclaim_state; + + struct io_context *io_context; + +#ifdef CONFIG_COMPACTION + struct capture_control *capture_control; +#endif + /* Ptrace state: */ + unsigned long ptrace_message; + kernel_siginfo_t *last_siginfo; + + struct task_io_accounting ioac; +#ifdef CONFIG_PSI + /* Pressure stall state */ + unsigned int psi_flags; +#endif +#ifdef CONFIG_TASK_XACCT + /* Accumulated RSS usage: */ + u64 acct_rss_mem1; + /* Accumulated virtual memory usage: */ + u64 acct_vm_mem1; + /* stime + utime since last update: */ + u64 acct_timexpd; +#endif +#ifdef CONFIG_CPUSETS + /* Protected by ->alloc_lock: */ + nodemask_t mems_allowed; + /* Sequence number to catch updates: */ + seqcount_spinlock_t mems_allowed_seq; + int cpuset_mem_spread_rotor; +#endif +#ifdef CONFIG_CGROUPS + /* Control Group info protected by css_set_lock: */ + struct css_set __rcu *cgroups; + /* cg_list protected by css_set_lock and tsk->alloc_lock: */ + struct list_head cg_list; +#endif +#ifdef CONFIG_X86_CPU_RESCTRL + u32 closid; + u32 rmid; +#endif +#ifdef CONFIG_FUTEX + struct robust_list_head __user *robust_list; +#ifdef CONFIG_COMPAT + struct compat_robust_list_head __user *compat_robust_list; +#endif + struct list_head pi_state_list; + struct futex_pi_state *pi_state_cache; + struct mutex futex_exit_mutex; + unsigned int futex_state; +#endif +#ifdef CONFIG_PERF_EVENTS + u8 perf_recursion[PERF_NR_CONTEXTS]; + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; + struct perf_ctx_data __rcu *perf_ctx_data; +#endif +#ifdef CONFIG_DEBUG_PREEMPT + unsigned long preempt_disable_ip; +#endif +#ifdef CONFIG_NUMA + /* Protected by alloc_lock: */ + struct mempolicy *mempolicy; + short il_prev; + u8 il_weight; + short pref_node_fork; +#endif +#ifdef CONFIG_NUMA_BALANCING + int numa_scan_seq; + unsigned int numa_scan_period; + unsigned int numa_scan_period_max; + int numa_preferred_nid; + unsigned long numa_migrate_retry; + /* Migration stamp: */ + u64 node_stamp; + u64 last_task_numa_placement; + u64 last_sum_exec_runtime; + struct callback_head numa_work; + + /* + * This pointer is only modified for current in syscall and + * pagefault context (and for tasks being destroyed), so it can be read + * from any of the following contexts: + * - RCU read-side critical section + * - current->numa_group from everywhere + * - task's runqueue locked, task not running + */ + struct numa_group __rcu *numa_group; + + /* + * numa_faults is an array split into four regions: + * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer + * in this precise order. + * + * faults_memory: Exponential decaying average of faults on a per-node + * basis. Scheduling placement decisions are made based on these + * counts. The values remain static for the duration of a PTE scan. + * faults_cpu: Track the nodes the process was running on when a NUMA + * hinting fault was incurred. + * faults_memory_buffer and faults_cpu_buffer: Record faults per node + * during the current scan window. When the scan completes, the counts + * in faults_memory and faults_cpu decay and these values are copied. + */ + unsigned long *numa_faults; + unsigned long total_numa_faults; + + /* + * numa_faults_locality tracks if faults recorded during the last + * scan window were remote/local or failed to migrate. The task scan + * period is adapted based on the locality of the faults with different + * weights depending on whether they were shared or private faults + */ + unsigned long numa_faults_locality[3]; + + unsigned long numa_pages_migrated; +#endif /* CONFIG_NUMA_BALANCING */ + +#ifdef CONFIG_RSEQ + struct rseq __user *rseq; + u32 rseq_len; + u32 rseq_sig; + /* + * RmW on rseq_event_mask must be performed atomically + * with respect to preemption. + */ + unsigned long rseq_event_mask; +# ifdef CONFIG_DEBUG_RSEQ + /* + * This is a place holder to save a copy of the rseq fields for + * validation of read-only fields. The struct rseq has a + * variable-length array at the end, so it cannot be used + * directly. Reserve a size large enough for the known fields. + */ + char rseq_fields[sizeof(struct rseq)]; +# endif +#endif + +#ifdef CONFIG_SCHED_MM_CID + int mm_cid; /* Current cid in mm */ + int last_mm_cid; /* Most recent cid in mm */ + int migrate_from_cpu; + int mm_cid_active; /* Whether cid bitmap is active */ + struct callback_head cid_work; +#endif + + struct tlbflush_unmap_batch tlb_ubc; + + /* Cache last used pipe for splice(): */ + struct pipe_inode_info *splice_pipe; + + struct page_frag task_frag; + +#ifdef CONFIG_TASK_DELAY_ACCT + struct task_delay_info *delays; +#endif + +#ifdef CONFIG_FAULT_INJECTION + int make_it_fail; + unsigned int fail_nth; +#endif + /* + * When (nr_dirtied >=3D nr_dirtied_pause), it's time to call + * balance_dirty_pages() for a dirty throttling pause: + */ + int nr_dirtied; + int nr_dirtied_pause; + /* Start of a write-and-pause period: */ + unsigned long dirty_paused_when; + +#ifdef CONFIG_LATENCYTOP + int latency_record_count; + struct latency_record latency_record[LT_SAVECOUNT]; +#endif + /* + * Time slack values; these are used to round up poll() and + * select() etc timeout values. These are in nanoseconds. + */ + u64 timer_slack_ns; + u64 default_timer_slack_ns; + +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + unsigned int kasan_depth; +#endif + +#ifdef CONFIG_KCSAN + struct kcsan_ctx kcsan_ctx; +#ifdef CONFIG_TRACE_IRQFLAGS + struct irqtrace_events kcsan_save_irqtrace; +#endif +#ifdef CONFIG_KCSAN_WEAK_MEMORY + int kcsan_stack_depth; +#endif +#endif + +#ifdef CONFIG_KMSAN + struct kmsan_ctx kmsan_ctx; +#endif + +#if IS_ENABLED(CONFIG_KUNIT) + struct kunit *kunit_test; +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* Index of current stored address in ret_stack: */ + int curr_ret_stack; + int curr_ret_depth; + + /* Stack of return addresses for return function tracing: */ + unsigned long *ret_stack; + + /* Timestamp for last schedule: */ + unsigned long long ftrace_timestamp; + unsigned long long ftrace_sleeptime; + + /* + * Number of functions that haven't been traced + * because of depth overrun: + */ + atomic_t trace_overrun; + + /* Pause tracing: */ + atomic_t tracing_graph_pause; +#endif + +#ifdef CONFIG_TRACING + /* Bitmask and counter of trace recursion: */ + unsigned long trace_recursion; +#endif /* CONFIG_TRACING */ + +#ifdef CONFIG_KCOV + /* See kernel/kcov.c for more details. */ + + /* Coverage collection mode enabled for this task (0 if disabled): */ + unsigned int kcov_mode; + + /* Size of the kcov_area: */ + unsigned int kcov_size; + + /* Buffer for coverage collection: */ + void *kcov_area; + + /* KCOV descriptor wired with this task or NULL: */ + struct kcov *kcov; + + /* KCOV common handle for remote coverage collection: */ + u64 kcov_handle; + + /* KCOV sequence number: */ + int kcov_sequence; + + /* Collect coverage from softirq context: */ + unsigned int kcov_softirq; +#endif + +#ifdef CONFIG_MEMCG_V1 + struct mem_cgroup *memcg_in_oom; +#endif + +#ifdef CONFIG_MEMCG + /* Number of pages to reclaim on returning to userland: */ + unsigned int memcg_nr_pages_over_high; + + /* Used by memcontrol for targeted memcg charge: */ + struct mem_cgroup *active_memcg; + + /* Cache for current->cgroups->memcg->objcg lookups: */ + struct obj_cgroup *objcg; +#endif + +#ifdef CONFIG_BLK_CGROUP + struct gendisk *throttle_disk; +#endif + +#ifdef CONFIG_UPROBES + struct uprobe_task *utask; +#endif +#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) + unsigned int sequential_io; + unsigned int sequential_io_avg; +#endif + struct kmap_ctrl kmap_ctrl; +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + unsigned long task_state_change; +# ifdef CONFIG_PREEMPT_RT + unsigned long saved_state_change; +# endif +#endif + struct rcu_head rcu; + refcount_t rcu_users; + int pagefault_disabled; +#ifdef CONFIG_MMU + struct task_struct *oom_reaper_list; + struct timer_list oom_reaper_timer; +#endif +#ifdef CONFIG_VMAP_STACK + struct vm_struct *stack_vm_area; +#endif +#ifdef CONFIG_THREAD_INFO_IN_TASK + /* A live task holds one reference: */ + refcount_t stack_refcount; +#endif +#ifdef CONFIG_LIVEPATCH + int patch_state; +#endif +#ifdef CONFIG_SECURITY + /* Used by LSM modules for access restriction: */ + void *security; +#endif +#ifdef CONFIG_BPF_SYSCALL + /* Used by BPF task local storage */ + struct bpf_local_storage __rcu *bpf_storage; + /* Used for BPF run context */ + struct bpf_run_ctx *bpf_ctx; +#endif + /* Used by BPF for per-TASK xdp storage */ + struct bpf_net_context *bpf_net_context; + +#ifdef CONFIG_GCC_PLUGIN_STACKLEAK + unsigned long lowest_stack; + unsigned long prev_lowest_stack; +#endif + +#ifdef CONFIG_X86_MCE + void __user *mce_vaddr; + __u64 mce_kflags; + u64 mce_addr; + __u64 mce_ripv : 1, + mce_whole_page : 1, + __mce_reserved : 62; + struct callback_head mce_kill_me; + int mce_count; +#endif + +#ifdef CONFIG_KRETPROBES + struct llist_head kretprobe_instances; +#endif +#ifdef CONFIG_RETHOOK + struct llist_head rethooks; +#endif + +#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH + /* + * If L1D flush is supported on mm context switch + * then we use this callback head to queue kill work + * to kill tasks that are not running on SMT disabled + * cores + */ + struct callback_head l1d_flush_kill; +#endif + +#ifdef CONFIG_RV + /* + * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS. + * If we find justification for more monitors, we can think + * about adding more or developing a dynamic method. So far, + * none of these are justified. + */ + union rv_task_monitor rv[RV_PER_TASK_MONITORS]; +#endif + +#ifdef CONFIG_USER_EVENTS + struct user_event_mm *user_event_mm; +#endif + + /* CPU-specific state of this task: */ + struct thread_struct thread; + + /* + * New fields for task_struct should be added above here, so that + * they are included in the randomized portion of task_struct. + */ + randomized_struct_fields_end +} __attribute__ ((aligned (64))); + #endif /* _LINUX_SCHED_TYPES_H */ --=20 Email: Herbert Xu Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt