From: Steve Sistare <steven.sistare@oracle.com>
An overloaded CPU has more than 1 runnable task. When a CFS task wakes
on a CPU, if h_nr_runnable transitions from 1 to more, then set the CPU in
the cfs_overload_cpus bitmap. When a CFS task sleeps, if h_nr_runnable
transitions from 2 to less, then clear the CPU in cfs_overload_cpus.
Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
Signed-off-by: Chen Jinghuang <chenjinghuang2@huawei.com>
---
v5: Rename h_nr_running to h_nr_runnable and reposition
overload_set/overload_clear to fix overload detection for delay dequeue.
v4: Detect CPU overload via changes in h_nr_running.
---
kernel/sched/fair.c | 45 ++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 44 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index eea99ec01a3f..92c3bcff5b6b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -55,6 +55,7 @@
#include <uapi/linux/sched/types.h>
#include "sched.h"
+#include "sparsemask.h"
#include "stats.h"
#include "autogroup.h"
@@ -5076,6 +5077,33 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
}
+#ifdef CONFIG_SMP
+static void overload_clear(struct rq *rq)
+{
+ struct sparsemask *overload_cpus;
+
+ rcu_read_lock();
+ overload_cpus = rcu_dereference(rq->cfs_overload_cpus);
+ if (overload_cpus)
+ sparsemask_clear_elem(overload_cpus, rq->cpu);
+ rcu_read_unlock();
+}
+
+static void overload_set(struct rq *rq)
+{
+ struct sparsemask *overload_cpus;
+
+ rcu_read_lock();
+ overload_cpus = rcu_dereference(rq->cfs_overload_cpus);
+ if (overload_cpus)
+ sparsemask_set_elem(overload_cpus, rq->cpu);
+ rcu_read_unlock();
+}
+#else /* CONFIG_SMP */
+static inline void overload_clear(struct rq *rq) {}
+static inline void overload_set(struct rq *rq) {}
+#endif
+
void __setparam_fair(struct task_struct *p, const struct sched_attr *attr)
{
struct sched_entity *se = &p->se;
@@ -5955,6 +5983,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
if (!dequeue)
return false; /* Throttle no longer required. */
+
/* freeze hierarchy runnable averages while throttled */
rcu_read_lock();
walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
@@ -6875,6 +6904,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
int h_nr_idle = task_has_idle_policy(p);
int h_nr_runnable = 1;
int task_new = !(flags & ENQUEUE_WAKEUP);
+ unsigned int prev_nr = rq->cfs.h_nr_runnable;
int rq_h_nr_queued = rq->cfs.h_nr_queued;
u64 slice = 0;
@@ -6892,6 +6922,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (flags & ENQUEUE_DELAYED) {
requeue_delayed_entity(se);
+
+ if (prev_nr <= 1 && rq->cfs.h_nr_runnable >= 2)
+ overload_set(rq);
+
return;
}
@@ -6961,6 +6995,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
/* At this point se is NULL and we are at root level*/
add_nr_running(rq, 1);
+ if (prev_nr <= 1 && rq->cfs.h_nr_runnable >= 2)
+ overload_set(rq);
/*
* Since new tasks are assigned an initial util_avg equal to
@@ -7003,6 +7039,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
int h_nr_idle = 0;
int h_nr_queued = 0;
int h_nr_runnable = 0;
+ unsigned int prev_nr = rq->cfs.h_nr_runnable;
struct cfs_rq *cfs_rq;
u64 slice = 0;
@@ -7018,8 +7055,12 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
cfs_rq = cfs_rq_of(se);
if (!dequeue_entity(cfs_rq, se, flags)) {
- if (p && &p->se == se)
+ if (p && &p->se == se) {
+ if (prev_nr >= 2 && rq->cfs.h_nr_runnable <= 1)
+ overload_clear(rq);
+
return -1;
+ }
slice = cfs_rq_min_slice(cfs_rq);
break;
@@ -7077,6 +7118,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
}
sub_nr_running(rq, h_nr_queued);
+ if (prev_nr >= 2 && rq->cfs.h_nr_runnable <= 1)
+ overload_clear(rq);
/* balance early to pull high priority tasks */
if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
--
2.34.1
Hello,
kernel test robot noticed "UBSAN:array-index-out-of-bounds_in_kernel/sched/sparsemask.h" on:
commit: 7c86475c501e5d551136d0c2c8702d7c61a6b73f ("[RFC PATCH v5 4/9] sched/fair: Dynamically update cfs_overload_cpus")
url: https://github.com/intel-lab-lkp/linux/commits/Chen-Jinghuang/sched-Provide-sparsemask-a-reduced-contention-bitmap/20260321-084706
base: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git 42bddab0563fe67882b2722620a66dd98c8dbf33
patch link: https://lore.kernel.org/all/20260320055920.2518389-5-chenjinghuang2@huawei.com/
patch subject: [RFC PATCH v5 4/9] sched/fair: Dynamically update cfs_overload_cpus
in testcase: boot
config: x86_64-randconfig-016-20260323
compiler: clang-20
test machine: qemu-system-x86_64 -enable-kvm -cpu SandyBridge -smp 2 -m 32G
(please refer to attached dmesg/kmsg for entire log/backtrace)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <oliver.sang@intel.com>
| Closes: https://lore.kernel.org/oe-lkp/202603242133.f66e336f-lkp@intel.com
[ 3.884372][ T1] ------------[ cut here ]------------
[ 3.884372][ T1] UBSAN: array-index-out-of-bounds in kernel/sched/sparsemask.h:181:32
[ 3.884372][ T1] index 0 is out of range for type 'struct sparsemask_chunk[0]'
[ 3.884372][ T1] CPU: 1 UID: 0 PID: 1 Comm: swapper/0 Not tainted 7.0.0-rc4-00274-g7c86475c501e #1 PREEMPT 3f169c7b099bc50b6180dc864130cea2b43f07ba
[ 3.884372][ T1] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
[ 3.884372][ T1] Call Trace:
[ 3.884372][ T1] <TASK>
[ 3.884372][ T1] __dump_stack (lib/dump_stack.c:95)
[ 3.884372][ T1] dump_stack_lvl (lib/dump_stack.c:123 (discriminator 1))
[ 3.884372][ T1] dump_stack (lib/dump_stack.c:130)
[ 3.884372][ T1] ubsan_epilogue (lib/ubsan.c:234 (discriminator 2))
[ 3.884372][ T1] __ubsan_handle_out_of_bounds (lib/ubsan.c:?)
[ 3.884372][ T1] ? overload_set (include/linux/rcupdate.h:312 (discriminator 1) include/linux/rcupdate.h:850 (discriminator 1) kernel/sched/fair.c:5164 (discriminator 1))
[ 3.884372][ T1] overload_set (kernel/sched/sparsemask.h:181 (discriminator 4) kernel/sched/fair.c:5167 (discriminator 4))
[ 3.884372][ T1] ? overload_set (include/linux/rcupdate.h:312 (discriminator 1) include/linux/rcupdate.h:850 (discriminator 1) kernel/sched/fair.c:5164 (discriminator 1))
[ 3.884372][ T1] enqueue_task_fair (kernel/sched/fair.c:401 kernel/sched/fair.c:7092)
[ 3.884372][ T1] enqueue_task (kernel/sched/core.c:2111)
[ 3.884372][ T1] ttwu_do_activate (kernel/sched/core.c:2149 (discriminator 4) kernel/sched/core.c:3667 (discriminator 4))
[ 3.884372][ T1] try_to_wake_up (kernel/sched/sched.h:1883 kernel/sched/sched.h:1969 kernel/sched/core.c:3919 kernel/sched/core.c:4242)
[ 3.884372][ T1] wake_up_process (kernel/sched/core.c:4374)
[ 3.884372][ T1] __kthread_create_on_node (kernel/kthread.c:?)
[ 3.884372][ T1] kthread_create_on_node (kernel/kthread.c:562)
[ 3.884372][ T1] rcu_spawn_tasks_kthread_generic (kernel/rcu/tasks.h:682 (discriminator 512))
[ 3.884372][ T1] rcu_init_tasks_generic (kernel/rcu/tasks.h:1587)
[ 3.884372][ T1] do_one_initcall (init/main.c:1382)
[ 3.884372][ T1] ? stack_depot_save_flags (lib/stackdepot.c:?)
[ 3.884372][ T1] ? tasks_cblist_init_generic (kernel/rcu/tasks.h:1577)
[ 3.884372][ T1] ? kasan_save_track (arch/x86/include/asm/current.h:25 (discriminator 3) mm/kasan/common.c:70 (discriminator 3) mm/kasan/common.c:79 (discriminator 3))
[ 3.884372][ T1] ? kasan_save_track (mm/kasan/common.c:58 mm/kasan/common.c:78)
[ 3.884372][ T1] ? kasan_save_alloc_info (mm/kasan/generic.c:571 (discriminator 1))
[ 3.884372][ T1] ? __kasan_kmalloc (mm/kasan/common.c:419)
[ 3.884372][ T1] ? __kmalloc_noprof (mm/slub.c:5261 mm/slub.c:5272)
[ 3.884372][ T1] ? do_initcalls (init/main.c:1454)
[ 3.884372][ T1] ? do_basic_setup (init/main.c:1480)
[ 3.884372][ T1] ? kernel_init_freeable (init/main.c:1694)
[ 3.884372][ T1] ? kernel_init (init/main.c:1584)
[ 3.884372][ T1] ? ret_from_fork (arch/x86/kernel/process.c:164)
[ 3.884372][ T1] ? ret_from_fork_asm (arch/x86/entry/entry_64.S:258)
[ 3.884372][ T1] ? _raw_spin_unlock_irqrestore (include/linux/spinlock_api_smp.h:179 (discriminator 1) kernel/locking/spinlock.c:194 (discriminator 1))
[ 3.884372][ T1] ? stack_depot_save_flags (lib/stackdepot.c:?)
[ 3.884372][ T1] ? stack_depot_save (lib/stackdepot.c:747)
[ 3.884372][ T1] ? set_track_prepare (mm/slub.c:1036)
[ 3.884372][ T1] ? do_initcalls (init/main.c:1454)
[ 3.884372][ T1] ? do_basic_setup (init/main.c:1480)
[ 3.884372][ T1] ? kernel_init_freeable (init/main.c:1694)
[ 3.884372][ T1] ? kernel_init (init/main.c:1584)
[ 3.884372][ T1] ? ret_from_fork (arch/x86/kernel/process.c:164)
[ 3.884372][ T1] ? ret_from_fork_asm (arch/x86/entry/entry_64.S:258)
[ 3.884372][ T1] ? next_arg (lib/cmdline.c:273)
[ 3.884372][ T1] ? parameq (kernel/params.c:90 (discriminator 1) kernel/params.c:99 (discriminator 1))
[ 3.884372][ T1] ? parse_args (kernel/params.c:153 kernel/params.c:186)
[ 3.884372][ T1] do_initcall_level (init/main.c:1443 (discriminator 6))
[ 3.884372][ T1] do_initcalls (init/main.c:1457 (discriminator 2))
[ 3.884372][ T1] ? kernel_init (init/main.c:1584)
[ 3.884372][ T1] do_basic_setup (init/main.c:1480)
[ 3.884372][ T1] kernel_init_freeable (init/main.c:1694)
[ 3.884372][ T1] ? rest_init (init/main.c:1574)
[ 3.884372][ T1] kernel_init (init/main.c:1584)
[ 3.884372][ T1] ? rest_init (init/main.c:1574)
[ 3.884372][ T1] ret_from_fork (arch/x86/kernel/process.c:164)
[ 3.884372][ T1] ? rest_init (init/main.c:1574)
[ 3.884372][ T1] ret_from_fork_asm (arch/x86/entry/entry_64.S:258)
[ 3.884372][ T1] </TASK>
[ 3.884372][ T1] ---[ end trace ]---
The kernel config and materials to reproduce are available at:
https://download.01.org/0day-ci/archive/20260324/202603242133.f66e336f-lkp@intel.com
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2026 Red Hat, Inc.