kernel/sched/ext.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
enqueue_task_scx() takes int enq_flags from the sched_class interface.
SCX enqueue flags starting at bit 32 (SCX_ENQ_PREEMPT and above) are
silently truncated when passed through activate_task(). extra_enq_flags
was added as a workaround - storing high bits in rq->scx.extra_enq_flags
and OR-ing them back in enqueue_task_scx(). However, the OR target is
still the int parameter, so the high bits are lost anyway.
The current impact is limited as the only affected flag is SCX_ENQ_PREEMPT
which is informational to the BPF scheduler - its loss means the scheduler
doesn't know about preemption but doesn't cause incorrect behavior.
Fix by renaming the int parameter to core_enq_flags and introducing a
u64 enq_flags local that merges both sources. All downstream functions
already take u64 enq_flags.
Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class")
Cc: stable@vger.kernel.org # v6.12+
Signed-off-by: Tejun Heo <tj@kernel.org>
---
kernel/sched/ext.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index XXXXXXX..XXXXXXX 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1728,16 +1728,15 @@ static void clr_task_runnable(struct task_struct *p)
p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
}
-static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
+static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int core_enq_flags)
{
struct scx_sched *sch = scx_task_sched(p);
int sticky_cpu = p->scx.sticky_cpu;
+ u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags;
if (enq_flags & ENQUEUE_WAKEUP)
rq->scx.flags |= SCX_RQ_IN_WAKEUP;
- enq_flags |= rq->scx.extra_enq_flags;
-
/*
* Restoring a running task will be immediately followed by
* set_next_task_scx() which expects the task to not be on the BPF
--
tejun
Applied to sched_ext/for-7.0-fixes. Thanks. -- tejun
On Fri, Mar 06, 2026 at 02:23:13PM -1000, Tejun Heo wrote:
> enqueue_task_scx() takes int enq_flags from the sched_class interface.
> SCX enqueue flags starting at bit 32 (SCX_ENQ_PREEMPT and above) are
> silently truncated when passed through activate_task(). extra_enq_flags
> was added as a workaround - storing high bits in rq->scx.extra_enq_flags
> and OR-ing them back in enqueue_task_scx(). However, the OR target is
> still the int parameter, so the high bits are lost anyway.
>
> The current impact is limited as the only affected flag is SCX_ENQ_PREEMPT
> which is informational to the BPF scheduler - its loss means the scheduler
> doesn't know about preemption but doesn't cause incorrect behavior.
>
> Fix by renaming the int parameter to core_enq_flags and introducing a
> u64 enq_flags local that merges both sources. All downstream functions
> already take u64 enq_flags.
>
> Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class")
> Cc: stable@vger.kernel.org # v6.12+
> Signed-off-by: Tejun Heo <tj@kernel.org>
Good catch.
Acked-by: Andrea Righi <arighi@nvidia.com>
Thanks,
-Andrea
> ---
> kernel/sched/ext.c | 5 ++---
> 1 file changed, 2 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> index XXXXXXX..XXXXXXX 100644
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -1728,16 +1728,15 @@ static void clr_task_runnable(struct task_struct *p)
> p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
> }
>
> -static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
> +static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int core_enq_flags)
> {
> struct scx_sched *sch = scx_task_sched(p);
> int sticky_cpu = p->scx.sticky_cpu;
> + u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags;
>
> if (enq_flags & ENQUEUE_WAKEUP)
> rq->scx.flags |= SCX_RQ_IN_WAKEUP;
>
> - enq_flags |= rq->scx.extra_enq_flags;
> -
> /*
> * Restoring a running task will be immediately followed by
> * set_next_task_scx() which expects the task to not be on the BPF
> --
> tejun
© 2016 - 2026 Red Hat, Inc.