kernel/sched/ext.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-)
Avoid invoking update_locked_rq() when the runqueue (rq) pointer is NULL
in the SCX_CALL_OP and SCX_CALL_OP_RET macros.
Previously, calling update_locked_rq(NULL) with preemption enabled could
trigger the following warning:
BUG: using __this_cpu_write() in preemptible [00000000]
This happens because __this_cpu_write() is unsafe to use in preemptible
context.
Ensure that update_locked_rq() is only called when rq is non-NULL,
preventing calling __this_cpu_write() on preemptible context.
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Fixes: 18853ba782bef ("sched_ext: Track currently locked rq")
Signed-off-by: Breno Leitao <leitao@debian.org>
---
Changes in v2:
- Avoid calling update_locked_rq() completely on preemptible mode (Peter Zijlstra)
- Link to v1: https://lore.kernel.org/r/20250716-scx_warning-v1-1-0e814f78eb8c@debian.org
---
kernel/sched/ext.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index b498d867ba210..7dd5cbcb7a069 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1272,7 +1272,8 @@ static inline struct rq *scx_locked_rq(void)
#define SCX_CALL_OP(sch, mask, op, rq, args...) \
do { \
- update_locked_rq(rq); \
+ if (rq) \
+ update_locked_rq(rq); \
if (mask) { \
scx_kf_allow(mask); \
(sch)->ops.op(args); \
@@ -1280,14 +1281,16 @@ do { \
} else { \
(sch)->ops.op(args); \
} \
- update_locked_rq(NULL); \
+ if (rq) \
+ update_locked_rq(NULL); \
} while (0)
#define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \
({ \
__typeof__((sch)->ops.op(args)) __ret; \
\
- update_locked_rq(rq); \
+ if (rq) \
+ update_locked_rq(rq); \
if (mask) { \
scx_kf_allow(mask); \
__ret = (sch)->ops.op(args); \
@@ -1295,7 +1298,8 @@ do { \
} else { \
__ret = (sch)->ops.op(args); \
} \
- update_locked_rq(NULL); \
+ if (rq) \
+ update_locked_rq(NULL); \
__ret; \
})
---
base-commit: 155a3c003e555a7300d156a5252c004c392ec6b0
change-id: 20250716-scx_warning-5143cf17f806
Best regards,
--
Breno Leitao <leitao@debian.org>
Applied to sched_ext/for-6.16-fixes w/ commit message edit suggested by Andrea: ------ 8< ------ From e14fd98c6d66cb76694b12c05768e4f9e8c95664 Mon Sep 17 00:00:00 2001 From: Breno Leitao <leitao@debian.org> Date: Wed, 16 Jul 2025 10:38:48 -0700 Subject: [PATCH] sched/ext: Prevent update_locked_rq() calls with NULL rq Avoid invoking update_locked_rq() when the runqueue (rq) pointer is NULL in the SCX_CALL_OP and SCX_CALL_OP_RET macros. Previously, calling update_locked_rq(NULL) with preemption enabled could trigger the following warning: BUG: using __this_cpu_write() in preemptible [00000000] This happens because __this_cpu_write() is unsafe to use in preemptible context. rq is NULL when an ops invoked from an unlocked context. In such cases, we don't need to store any rq, since the value should already be NULL (unlocked). Ensure that update_locked_rq() is only called when rq is non-NULL, preventing calling __this_cpu_write() on preemptible context. Suggested-by: Peter Zijlstra <peterz@infradead.org> Fixes: 18853ba782bef ("sched_ext: Track currently locked rq") Signed-off-by: Breno Leitao <leitao@debian.org> Acked-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: stable@vger.kernel.org # v6.15 --- kernel/sched/ext.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index b498d867ba21..7dd5cbcb7a06 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1272,7 +1272,8 @@ static inline struct rq *scx_locked_rq(void) #define SCX_CALL_OP(sch, mask, op, rq, args...) \ do { \ - update_locked_rq(rq); \ + if (rq) \ + update_locked_rq(rq); \ if (mask) { \ scx_kf_allow(mask); \ (sch)->ops.op(args); \ @@ -1280,14 +1281,16 @@ do { \ } else { \ (sch)->ops.op(args); \ } \ - update_locked_rq(NULL); \ + if (rq) \ + update_locked_rq(NULL); \ } while (0) #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \ ({ \ __typeof__((sch)->ops.op(args)) __ret; \ \ - update_locked_rq(rq); \ + if (rq) \ + update_locked_rq(rq); \ if (mask) { \ scx_kf_allow(mask); \ __ret = (sch)->ops.op(args); \ @@ -1295,7 +1298,8 @@ do { \ } else { \ __ret = (sch)->ops.op(args); \ } \ - update_locked_rq(NULL); \ + if (rq) \ + update_locked_rq(NULL); \ __ret; \ }) -- 2.50.1
On Wed, Jul 16, 2025 at 10:38:48AM -0700, Breno Leitao wrote: > Avoid invoking update_locked_rq() when the runqueue (rq) pointer is NULL > in the SCX_CALL_OP and SCX_CALL_OP_RET macros. Maybe it'd be useful to clarify that rq == NULL indicates a callback invoked from an unlocked context. In that case, we don't need to store any rq, since the default is already NULL (unlocked), and we can avoid unnecessary updates. With something like that: Acked-by: Andrea Righi <arighi@nvidia.com> Thanks, -Andrea > > Previously, calling update_locked_rq(NULL) with preemption enabled could > trigger the following warning: > > BUG: using __this_cpu_write() in preemptible [00000000] > > This happens because __this_cpu_write() is unsafe to use in preemptible > context. > > Ensure that update_locked_rq() is only called when rq is non-NULL, > preventing calling __this_cpu_write() on preemptible context. > > Suggested-by: Peter Zijlstra <peterz@infradead.org> > Fixes: 18853ba782bef ("sched_ext: Track currently locked rq") > Signed-off-by: Breno Leitao <leitao@debian.org> > --- > Changes in v2: > - Avoid calling update_locked_rq() completely on preemptible mode (Peter Zijlstra) > - Link to v1: https://lore.kernel.org/r/20250716-scx_warning-v1-1-0e814f78eb8c@debian.org > --- > kernel/sched/ext.c | 12 ++++++++---- > 1 file changed, 8 insertions(+), 4 deletions(-) > > diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c > index b498d867ba210..7dd5cbcb7a069 100644 > --- a/kernel/sched/ext.c > +++ b/kernel/sched/ext.c > @@ -1272,7 +1272,8 @@ static inline struct rq *scx_locked_rq(void) > > #define SCX_CALL_OP(sch, mask, op, rq, args...) \ > do { \ > - update_locked_rq(rq); \ > + if (rq) \ > + update_locked_rq(rq); \ > if (mask) { \ > scx_kf_allow(mask); \ > (sch)->ops.op(args); \ > @@ -1280,14 +1281,16 @@ do { \ > } else { \ > (sch)->ops.op(args); \ > } \ > - update_locked_rq(NULL); \ > + if (rq) \ > + update_locked_rq(NULL); \ > } while (0) > > #define SCX_CALL_OP_RET(sch, mask, op, rq, args...) \ > ({ \ > __typeof__((sch)->ops.op(args)) __ret; \ > \ > - update_locked_rq(rq); \ > + if (rq) \ > + update_locked_rq(rq); \ > if (mask) { \ > scx_kf_allow(mask); \ > __ret = (sch)->ops.op(args); \ > @@ -1295,7 +1298,8 @@ do { \ > } else { \ > __ret = (sch)->ops.op(args); \ > } \ > - update_locked_rq(NULL); \ > + if (rq) \ > + update_locked_rq(NULL); \ > __ret; \ > }) > > > --- > base-commit: 155a3c003e555a7300d156a5252c004c392ec6b0 > change-id: 20250716-scx_warning-5143cf17f806 > > Best regards, > -- > Breno Leitao <leitao@debian.org> >
© 2016 - 2025 Red Hat, Inc.