kernel/sched/ext.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
The BUILD_BUG_ON() which checks that __SCX_DSQ_ITER_ALL_FLAGS doesn't
overlap with the private lnode bits was in scx_task_iter_start() which has
nothing to do with DSQ iteration. Move it to bpf_iter_scx_dsq_new() where it
belongs.
No functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
kernel/sched/ext.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -474,9 +474,6 @@ struct scx_task_iter {
*/
static void scx_task_iter_start(struct scx_task_iter *iter)
{
- BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
- ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
-
spin_lock_irq(&scx_tasks_lock);
iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
@@ -6218,6 +6215,8 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(str
sizeof(struct bpf_iter_scx_dsq));
BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
__alignof__(struct bpf_iter_scx_dsq));
+ BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
+ ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
/*
* next() and destroy() will be called regardless of the return value.
> Tejun Heo (2): > sched_ext: Move __SCX_DSQ_ITER_ALL_FLAGS BUILD_BUG_ON to the right place > sched_ext: Minor cleanups to scx_task_iter Applied 1-2 to sched_ext/for-6.19. Thanks. -- tejun
On Tue, Nov 04, 2025 at 11:40:22AM -1000, Tejun Heo wrote:
> The BUILD_BUG_ON() which checks that __SCX_DSQ_ITER_ALL_FLAGS doesn't
> overlap with the private lnode bits was in scx_task_iter_start() which has
> nothing to do with DSQ iteration. Move it to bpf_iter_scx_dsq_new() where it
> belongs.
>
> No functional changes.
>
> Signed-off-by: Tejun Heo <tj@kernel.org>
Looks good (both patches)
Acked-by: Andrea Righi <arighi@nvidia.com>
Thanks,
-Andrea
> ---
> kernel/sched/ext.c | 5 ++---
> 1 file changed, 2 insertions(+), 3 deletions(-)
>
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -474,9 +474,6 @@ struct scx_task_iter {
> */
> static void scx_task_iter_start(struct scx_task_iter *iter)
> {
> - BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
> - ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
> -
> spin_lock_irq(&scx_tasks_lock);
>
> iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
> @@ -6218,6 +6215,8 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(str
> sizeof(struct bpf_iter_scx_dsq));
> BUILD_BUG_ON(__alignof__(struct bpf_iter_scx_dsq_kern) !=
> __alignof__(struct bpf_iter_scx_dsq));
> + BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
> + ((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
>
> /*
> * next() and destroy() will be called regardless of the return value.
- Use memset() in scx_task_iter_start() instead of zeroing fields individually.
- In scx_task_iter_next(), move __scx_task_iter_maybe_relock() after the batch
check which is simpler.
- Update comment to reflect that tasks are removed from scx_tasks when dead
(commit 7900aa699c34 ("sched_ext: Fix cgroup exit ordering by moving
sched_ext_free() to finish_task_switch()")).
No functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
kernel/sched/ext.c | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -470,16 +470,16 @@ struct scx_task_iter {
* RCU read lock or obtaining a reference count.
*
* All tasks which existed when the iteration started are guaranteed to be
- * visited as long as they still exist.
+ * visited as long as they are not dead.
*/
static void scx_task_iter_start(struct scx_task_iter *iter)
{
+ memset(iter, 0, sizeof(*iter));
+
spin_lock_irq(&scx_tasks_lock);
iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
list_add(&iter->cursor.tasks_node, &scx_tasks);
- iter->locked_task = NULL;
- iter->cnt = 0;
iter->list_locked = true;
}
@@ -545,14 +545,13 @@ static struct task_struct *scx_task_iter
struct list_head *cursor = &iter->cursor.tasks_node;
struct sched_ext_entity *pos;
- __scx_task_iter_maybe_relock(iter);
-
if (!(++iter->cnt % SCX_TASK_ITER_BATCH)) {
scx_task_iter_unlock(iter);
cond_resched();
- __scx_task_iter_maybe_relock(iter);
}
+ __scx_task_iter_maybe_relock(iter);
+
list_for_each_entry(pos, cursor, tasks_node) {
if (&pos->tasks_node == &scx_tasks)
return NULL;
© 2016 - 2025 Red Hat, Inc.