From nobody Mon May 11 05:34:48 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B0472C433F5 for ; Wed, 13 Apr 2022 13:31:10 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235770AbiDMNd2 (ORCPT ); Wed, 13 Apr 2022 09:33:28 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42104 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235761AbiDMNdZ (ORCPT ); Wed, 13 Apr 2022 09:33:25 -0400 Received: from galois.linutronix.de (Galois.linutronix.de [193.142.43.55]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DE1C45E765 for ; Wed, 13 Apr 2022 06:31:03 -0700 (PDT) Message-ID: <20220413133024.249118058@linutronix.de> DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020; t=1649856662; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=69AEwFG9KN7E8YNtjZ/V9qyJTOeoNdNZpvhNkDaqw4I=; b=yFR2IECGpJx6Rv//CloyWMdYnZ0Mi5YoRty0BQ1EapXLz/eAGzU+JTDBe7AAjYYSvrc61B 3PH/NonMi5AMn3r+7EFc6tjzwYMDpS3n/iy8xsCTo3wAD1D7UicPUbGCMCn29+heCzsCzQ l58NVwEcXk0HYK4JcspGJf1oKJ/J9cwxghmG5rjWrX7NgbMSeW/X2uj4uTqfzewxdfc87Z UaShlNPbL6IeVPYFEzTxfjBrlRcQIcDe/nwq8baKAg20ZEJOw2D5zNzHpKGNkGiuGyxOHh tIiVfqgy/QKobe81FEPFScwJY0VCor4EYqjWLTcOnI/Tfw2WF5L9ZKUHiU8jdg== DKIM-Signature: v=1; a=ed25519-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020e; t=1649856662; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=69AEwFG9KN7E8YNtjZ/V9qyJTOeoNdNZpvhNkDaqw4I=; b=Q+0vE2QoWFyjrJI/oLw/Gdo3nulfE9wFqeV5eNh5ZpuysHpDNZN3u3RPWQi2aUAByRgxS+ UEs9y1hmYmamMUDQ== From: Thomas Gleixner To: LKML Cc: Christoph Hellwig , Peter Zijlstra , Sebastian Andrzej Siewior , kernel test robot Subject: [patch V5 1/3] sched: Fix missing prototype warnings References: <20220413132836.099363044@linutronix.de> MIME-Version: 1.0 Date: Wed, 13 Apr 2022 15:31:02 +0200 (CEST) Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" A W=3D1 build emits more than a dozen missing prototype warnings related to scheduler and scheduler specific includes. Reported-by: kernel test robot Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) --- V5: New patch --- include/linux/sched.h | 2 ++ kernel/sched/build_policy.c | 2 ++ kernel/sched/build_utility.c | 1 + kernel/sched/core.c | 3 +++ kernel/sched/deadline.c | 2 -- kernel/sched/fair.c | 1 + kernel/sched/sched.h | 8 ++------ kernel/sched/smp.h | 6 ++++++ kernel/stop_machine.c | 2 -- 9 files changed, 17 insertions(+), 10 deletions(-) --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2361,4 +2361,6 @@ static inline void sched_core_free(struc static inline void sched_core_fork(struct task_struct *p) { } #endif =20 +extern void sched_set_stop_task(int cpu, struct task_struct *stop); + #endif --- a/kernel/sched/build_policy.c +++ b/kernel/sched/build_policy.c @@ -15,6 +15,7 @@ /* Headers: */ #include #include +#include #include #include =20 @@ -31,6 +32,7 @@ #include =20 #include "sched.h" +#include "smp.h" =20 #include "autogroup.h" #include "stats.h" --- a/kernel/sched/build_utility.c +++ b/kernel/sched/build_utility.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -26,7 +26,10 @@ #include #include #include +#include #include +#include +#include #include #include #include --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1220,8 +1220,6 @@ int dl_runtime_exceeded(struct sched_dl_ return (dl_se->runtime <=3D 0); } =20 -extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); - /* * This function implements the GRUB accounting rule: * according to the GRUB reclaiming algorithm, the runtime is --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -36,6 +36,7 @@ #include #include #include +#include =20 #include #include --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1827,12 +1827,7 @@ static inline void dirty_sched_domain_sy #endif =20 extern int sched_update_scaling(void); - -extern void flush_smp_call_function_from_idle(void); - -#else /* !CONFIG_SMP: */ -static inline void flush_smp_call_function_from_idle(void) { } -#endif +#endif /* CONFIG_SMP */ =20 #include "stats.h" =20 @@ -2309,6 +2304,7 @@ extern void resched_cpu(int cpu); =20 extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 r= untime); +extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); =20 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 r= untime); extern void init_dl_task_timer(struct sched_dl_entity *dl_se); --- a/kernel/sched/smp.h +++ b/kernel/sched/smp.h @@ -7,3 +7,9 @@ extern void sched_ttwu_pending(void *arg); =20 extern void send_call_function_single_ipi(int cpu); + +#ifdef CONFIG_SMP +extern void flush_smp_call_function_from_idle(void); +#else +static inline void flush_smp_call_function_from_idle(void) { } +#endif --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -535,8 +535,6 @@ void stop_machine_park(int cpu) kthread_park(stopper->thread); } =20 -extern void sched_set_stop_task(int cpu, struct task_struct *stop); - static void cpu_stop_create(unsigned int cpu) { sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); From nobody Mon May 11 05:34:48 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 36274C433EF for ; Wed, 13 Apr 2022 13:31:14 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235777AbiDMNdd (ORCPT ); Wed, 13 Apr 2022 09:33:33 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42176 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235765AbiDMNd0 (ORCPT ); Wed, 13 Apr 2022 09:33:26 -0400 Received: from galois.linutronix.de (Galois.linutronix.de [193.142.43.55]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7DC72546A2 for ; Wed, 13 Apr 2022 06:31:05 -0700 (PDT) Message-ID: <20220413133024.305001096@linutronix.de> DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020; t=1649856664; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=g9e7k+CdFzSWcwYyvuM4WnmOYrbZsluI5FvW598pwQI=; b=WuHAhMpVp/wRdZax9ANdLw5blo5l1bKzyNuUatODEwUxlfVaVaOlzhFulApKQpjGezM4yR b8F300YrsiC/0rLlbhpKRDrfCVQ5AhOL5EMuGh2gpsbAEH74gWYCwWxB7RGxNUnvslRNaD bk35szds/l/htmdzgMsPvhAZAGoozoiXMqn5H9ns7+etGPfOPgnE8xedJ2tNPPaqd745h6 njYa57whj4QSTK32UNvWRel3e2gFwJtsNztp2y3Z0F+AIjXbJggPIDsunP4PFvvAju5cYV vt9tsvh9A7mTEemG1vsVDlO2GB0j8flbybIB7qbh0i0Srf4bd408gvx2eFYsrQ== DKIM-Signature: v=1; a=ed25519-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020e; t=1649856664; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=g9e7k+CdFzSWcwYyvuM4WnmOYrbZsluI5FvW598pwQI=; b=/BbWiysCvtKDSCw+FKCVMF5LN//Jtjw0N6Qw1v+gzHTnziwTCPf2Q2GXW5oU/tWu1fM3qt rHqFUHVBmZ1bgcCQ== From: Thomas Gleixner To: LKML Cc: Christoph Hellwig , Peter Zijlstra , Sebastian Andrzej Siewior Subject: [patch V5 2/3] smp: Rename flush_smp_call_function_from_idle() References: <20220413132836.099363044@linutronix.de> MIME-Version: 1.0 Date: Wed, 13 Apr 2022 15:31:03 +0200 (CEST) Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" This is invoked from the stopper thread too, which is definitely not idle. Rename it to flush_smp_call_function_queue() and fixup the callers. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) --- V4: New patch --- kernel/sched/core.c | 2 +- kernel/sched/idle.c | 2 +- kernel/sched/smp.h | 4 ++-- kernel/smp.c | 27 ++++++++++++++++++++------- 4 files changed, 24 insertions(+), 11 deletions(-) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2411,7 +2411,7 @@ static int migration_cpu_stop(void *data * __migrate_task() such that we will not miss enforcing cpus_ptr * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. */ - flush_smp_call_function_from_idle(); + flush_smp_call_function_queue(); =20 raw_spin_lock(&p->pi_lock); rq_lock(rq, &rf); --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -327,7 +327,7 @@ static void do_idle(void) * RCU relies on this call to be done outside of an RCU read-side * critical section. */ - flush_smp_call_function_from_idle(); + flush_smp_call_function_queue(); schedule_idle(); =20 if (unlikely(klp_patch_pending(current))) --- a/kernel/sched/smp.h +++ b/kernel/sched/smp.h @@ -9,7 +9,7 @@ extern void sched_ttwu_pending(void *arg extern void send_call_function_single_ipi(int cpu); =20 #ifdef CONFIG_SMP -extern void flush_smp_call_function_from_idle(void); +extern void flush_smp_call_function_queue(void); #else -static inline void flush_smp_call_function_from_idle(void) { } +static inline void flush_smp_call_function_queue(void) { } #endif --- a/kernel/smp.c +++ b/kernel/smp.c @@ -96,7 +96,7 @@ static DEFINE_PER_CPU_ALIGNED(struct cal =20 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); =20 -static void flush_smp_call_function_queue(bool warn_cpu_offline); +static void __flush_smp_call_function_queue(bool warn_cpu_offline); =20 int smpcfd_prepare_cpu(unsigned int cpu) { @@ -141,7 +141,7 @@ int smpcfd_dying_cpu(unsigned int cpu) * ensure that the outgoing CPU doesn't go offline with work * still pending. */ - flush_smp_call_function_queue(false); + __flush_smp_call_function_queue(false); irq_work_run(); return 0; } @@ -541,11 +541,11 @@ void generic_smp_call_function_single_in { cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU, smp_processor_id(), CFD_SEQ_GOTIPI); - flush_smp_call_function_queue(true); + __flush_smp_call_function_queue(true); } =20 /** - * flush_smp_call_function_queue - Flush pending smp-call-function callbac= ks + * __flush_smp_call_function_queue - Flush pending smp-call-function callb= acks * * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an * offline CPU. Skip this check if set to 'false'. @@ -558,7 +558,7 @@ void generic_smp_call_function_single_in * Loop through the call_single_queue and run all the queued callbacks. * Must be called with interrupts disabled. */ -static void flush_smp_call_function_queue(bool warn_cpu_offline) +static void __flush_smp_call_function_queue(bool warn_cpu_offline) { call_single_data_t *csd, *csd_next; struct llist_node *entry, *prev; @@ -681,7 +681,20 @@ static void flush_smp_call_function_queu smp_processor_id(), CFD_SEQ_HDLEND); } =20 -void flush_smp_call_function_from_idle(void) + +/** + * flush_smp_call_function_queue - Flush pending smp-call-function callbac= ks + * from task context (idle, migration thread) + * + * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it + * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by + * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to + * handle queued SMP function calls before scheduling. + * + * The migration thread has to ensure that an eventually pending wakeup has + * been handled before it migrates a task. + */ +void flush_smp_call_function_queue(void) { unsigned long flags; =20 @@ -691,7 +704,7 @@ void flush_smp_call_function_from_idle(v cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, smp_processor_id(), CFD_SEQ_IDLE); local_irq_save(flags); - flush_smp_call_function_queue(true); + __flush_smp_call_function_queue(true); if (local_softirq_pending()) do_softirq(); From nobody Mon May 11 05:34:48 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 12FD2C433F5 for ; Wed, 13 Apr 2022 13:31:18 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235787AbiDMNdh (ORCPT ); Wed, 13 Apr 2022 09:33:37 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42298 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235772AbiDMNd3 (ORCPT ); Wed, 13 Apr 2022 09:33:29 -0400 Received: from galois.linutronix.de (Galois.linutronix.de [IPv6:2a0a:51c0:0:12e:550::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 260145D5E7 for ; Wed, 13 Apr 2022 06:31:07 -0700 (PDT) Message-ID: <20220413133024.356509586@linutronix.de> DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020; t=1649856665; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=hndER118dmw7kYMFXyYuvsxJ9ycP9/DqyDamURXJ4cA=; b=nToUJbUjDp19tCC34fQsnLD80e9uEBpO6zMUv2CRiM2HdiUcrw6ShlgSs0RR8zYhBtZM7I XoTBDDcW2ZjEI94wQCnCc3V2oksdyEOz6lSaLbQJbRyhPEuuBDsDySfaDJtXA0r+89HD6P 8AHc51vIafk+7BjKQAdtV+lFhwP6BdrkvdEsq8CwdAxgRDEBJWQ5GKIKAZhLyD4hMS0pTP wTcanuYI8LKoYa5yHp6avlOz/TNQ90Z6i1OiMC5HAOWauJONZeirom5YaIBt6Dyyse9Gcj CofXuJjtUlvaUyGI9ljjspjT/7mWAIE/zYYrTNVc3QqhNbmHXsa1nCgIq9ncRA== DKIM-Signature: v=1; a=ed25519-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020e; t=1649856665; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=hndER118dmw7kYMFXyYuvsxJ9ycP9/DqyDamURXJ4cA=; b=mLHAnQn7pfVUUdyiJeOfFeqs6VtT74Sa7alLmxpMHxKolNUfcSJeQWITHXz1Q3c8Ommqha WD+dTyHgejoZYgCw== From: Thomas Gleixner To: LKML Cc: Christoph Hellwig , Peter Zijlstra , Sebastian Andrzej Siewior Subject: [patch V5 3/3] smp: Make softirq handling RT safe in flush_smp_call_function_queue() References: <20220413132836.099363044@linutronix.de> MIME-Version: 1.0 Date: Wed, 13 Apr 2022 15:31:05 +0200 (CEST) Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Sebastian Andrzej Siewior flush_smp_call_function_queue() invokes do_softirq() which is not available on PREEMPT_RT. flush_smp_call_function_queue() is invoked from the idle task and the migration task with preemption or interrupts disabled. So RT kernels cannot process soft interrupts in that context as that has to acquire 'sleeping spinlocks' which is not possible with preemption or interrupts disabled and forbidden from the idle task anyway. The currently known SMP function call which raises a soft interrupt is in the block layer, but this functionality is not enabled on RT kernels due to latency and performance reasons. RT could wake up ksoftirqd unconditionally, but this wants to be avoided if there were soft interrupts pending already when this is invoked in the context of the migration task. The migration task might have preempted a threaded interrupt handler which raised a soft interrupt, but did not reach the local_bh_enable() to process it. The "running" ksoftirqd might prevent the handling in the interrupt thread context which is causing latency issues. Add a new function which handles this case explicitely for RT and falls back to do_softirq() on !RT kernels. In the RT case this warns when one of the flushed SMP function calls raised a soft interrupt so this can be investigated. [ tglx: Moved the RT part out of SMP code ] Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/YgKgL6aPj8aBES6G@linutronix.de Acked-by: Peter Zijlstra (Intel) --- v4: - Move the RT logic into softirq.c which also avoids the wakeup when softinterrupts are disabled. The enable will handle them anyway. v3: - Only wake ksoftirqd if the softirqs were raised wthin flush_smp_call_function_queue(). - Add a warning in the wake case. v2: Drop an empty line. include/linux/interrupt.h | 9 +++++++++ kernel/smp.c | 5 ++++- kernel/softirq.c | 13 +++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) --- --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -607,6 +607,15 @@ struct softirq_action asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); =20 +#ifdef CONFIG_PREEMPT_RT +extern void do_softirq_post_smp_call_flush(unsigned int was_pending); +#else +static inline void do_softirq_post_smp_call_flush(unsigned int unused) +{ + do_softirq(); +} +#endif + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); --- a/kernel/smp.c +++ b/kernel/smp.c @@ -696,6 +696,7 @@ static void __flush_smp_call_function_qu */ void flush_smp_call_function_queue(void) { + unsigned int was_pending; unsigned long flags; =20 if (llist_empty(this_cpu_ptr(&call_single_queue))) @@ -704,9 +705,11 @@ void flush_smp_call_function_queue(void) cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, smp_processor_id(), CFD_SEQ_IDLE); local_irq_save(flags); + /* Get the already pending soft interrupts for RT enabled kernels */ + was_pending =3D local_softirq_pending(); __flush_smp_call_function_queue(true); if (local_softirq_pending()) - do_softirq(); + do_softirq_post_smp_call_flush(was_pending); =20 local_irq_restore(flags); } --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -294,6 +294,19 @@ static inline void invoke_softirq(void) wakeup_softirqd(); } =20 +/* + * flush_smp_call_function_queue() can raise a soft interrupt in a function + * call. On RT kernels this is undesired and the only known functionality + * in the block layer which does this is disabled on RT. If soft interrupts + * get raised which haven't been raised before the flush, warn so it can be + * investigated. + */ +void softirq_post_smp_call_flush(unsigned int was_pending) +{ + if (WARN_ON_ONCE(was_pending !=3D local_softirq_pending())) + invoke_softirq(); +} + #else /* CONFIG_PREEMPT_RT */ =20 /*