scx_locked_rq() is used both from ext.c and ext_idle.c, so make it
public and declare its prototype in ext.h.
No functional changes.
Signed-off-by: Andrea Righi <arighi@nvidia.com>
---
kernel/sched/ext.c | 2 +-
kernel/sched/ext.h | 2 ++
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 3e483138dff60..941603ec67e27 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1265,7 +1265,7 @@ static inline void update_locked_rq(struct rq *rq)
* Return the rq currently locked from an scx callback, or NULL if no rq is
* locked.
*/
-static inline struct rq *scx_locked_rq(void)
+struct rq *scx_locked_rq(void)
{
return __this_cpu_read(locked_rq);
}
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index d30f2d1bc00d5..cda5dfa4dad09 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -18,6 +18,8 @@ static inline bool scx_rq_bypassing(struct rq *rq)
return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
}
+struct rq *scx_locked_rq(void);
+
DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
void scx_tick(struct rq *rq);
--
2.49.0
Hi Andrea,
On 6/4/25 23:33, Andrea Righi wrote:
> scx_locked_rq() is used both from ext.c and ext_idle.c, so make it
> public and declare its prototype in ext.h.
scx_rq_bypassing() is the same, but it is defined with "static inline".
Would it be better to define with "static inline" for consistency? And,
anyway scx_rq_bypassing() is used only within ext*.
Regards,
Changwoo Min
>
> No functional changes.
>
> Signed-off-by: Andrea Righi <arighi@nvidia.com>
> ---
> kernel/sched/ext.c | 2 +-
> kernel/sched/ext.h | 2 ++
> 2 files changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> index 3e483138dff60..941603ec67e27 100644
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -1265,7 +1265,7 @@ static inline void update_locked_rq(struct rq *rq)
> * Return the rq currently locked from an scx callback, or NULL if no rq is
> * locked.
> */
> -static inline struct rq *scx_locked_rq(void)
> +struct rq *scx_locked_rq(void)
> {
> return __this_cpu_read(locked_rq);
> }
> diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
> index d30f2d1bc00d5..cda5dfa4dad09 100644
> --- a/kernel/sched/ext.h
> +++ b/kernel/sched/ext.h
> @@ -18,6 +18,8 @@ static inline bool scx_rq_bypassing(struct rq *rq)
> return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
> }
>
> +struct rq *scx_locked_rq(void);
> +
> DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
>
> void scx_tick(struct rq *rq);
Hi Changwoo,
On Thu, Jun 05, 2025 at 07:28:36AM +0200, Changwoo Min wrote:
> Hi Andrea,
>
> On 6/4/25 23:33, Andrea Righi wrote:
> > scx_locked_rq() is used both from ext.c and ext_idle.c, so make it
> > public and declare its prototype in ext.h.
>
> scx_rq_bypassing() is the same, but it is defined with "static inline".
> Would it be better to define with "static inline" for consistency? And,
> anyway scx_rq_bypassing() is used only within ext*.
Yep, I'll make scx_locked_rq() static inline as well as suggested by Tejun.
About scx_rq_bypassing(), it is currently used both in ext.c and
ext_idle.c, so we need to move that to ext.h.
Thanks,
-Andrea
>
> Regards,
> Changwoo Min
>
> >
> > No functional changes.
> >
> > Signed-off-by: Andrea Righi <arighi@nvidia.com>
> > ---
> > kernel/sched/ext.c | 2 +-
> > kernel/sched/ext.h | 2 ++
> > 2 files changed, 3 insertions(+), 1 deletion(-)
> >
> > diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> > index 3e483138dff60..941603ec67e27 100644
> > --- a/kernel/sched/ext.c
> > +++ b/kernel/sched/ext.c
> > @@ -1265,7 +1265,7 @@ static inline void update_locked_rq(struct rq *rq)
> > * Return the rq currently locked from an scx callback, or NULL if no rq is
> > * locked.
> > */
> > -static inline struct rq *scx_locked_rq(void)
> > +struct rq *scx_locked_rq(void)
> > {
> > return __this_cpu_read(locked_rq);
> > }
> > diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
> > index d30f2d1bc00d5..cda5dfa4dad09 100644
> > --- a/kernel/sched/ext.h
> > +++ b/kernel/sched/ext.h
> > @@ -18,6 +18,8 @@ static inline bool scx_rq_bypassing(struct rq *rq)
> > return unlikely(rq->scx.flags & SCX_RQ_BYPASSING);
> > }
> > +struct rq *scx_locked_rq(void);
> > +
> > DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
> > void scx_tick(struct rq *rq);
>
On Wed, Jun 04, 2025 at 04:33:14PM +0200, Andrea Righi wrote:
> scx_locked_rq() is used both from ext.c and ext_idle.c, so make it
> public and declare its prototype in ext.h.
>
> No functional changes.
>
> Signed-off-by: Andrea Righi <arighi@nvidia.com>
> ---
> kernel/sched/ext.c | 2 +-
> kernel/sched/ext.h | 2 ++
> 2 files changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> index 3e483138dff60..941603ec67e27 100644
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -1265,7 +1265,7 @@ static inline void update_locked_rq(struct rq *rq)
> * Return the rq currently locked from an scx callback, or NULL if no rq is
> * locked.
> */
> -static inline struct rq *scx_locked_rq(void)
> +struct rq *scx_locked_rq(void)
> {
> return __this_cpu_read(locked_rq);
Can you rename locked_rq to scx_locked_rq_var (or something else), expose it
and then make scx_locked_rq() an inline function in ext.h. Alternatively, I
think it'd be fine to drop the wrapper and let each user do
__this_cpu_read(scx_locked_rq) too.
Thanks.
--
tejun
On Wed, Jun 04, 2025 at 08:48:56AM -1000, Tejun Heo wrote:
> On Wed, Jun 04, 2025 at 04:33:14PM +0200, Andrea Righi wrote:
> > scx_locked_rq() is used both from ext.c and ext_idle.c, so make it
> > public and declare its prototype in ext.h.
> >
> > No functional changes.
> >
> > Signed-off-by: Andrea Righi <arighi@nvidia.com>
> > ---
> > kernel/sched/ext.c | 2 +-
> > kernel/sched/ext.h | 2 ++
> > 2 files changed, 3 insertions(+), 1 deletion(-)
> >
> > diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> > index 3e483138dff60..941603ec67e27 100644
> > --- a/kernel/sched/ext.c
> > +++ b/kernel/sched/ext.c
> > @@ -1265,7 +1265,7 @@ static inline void update_locked_rq(struct rq *rq)
> > * Return the rq currently locked from an scx callback, or NULL if no rq is
> > * locked.
> > */
> > -static inline struct rq *scx_locked_rq(void)
> > +struct rq *scx_locked_rq(void)
> > {
> > return __this_cpu_read(locked_rq);
>
> Can you rename locked_rq to scx_locked_rq_var (or something else), expose it
> and then make scx_locked_rq() an inline function in ext.h. Alternatively, I
> think it'd be fine to drop the wrapper and let each user do
> __this_cpu_read(scx_locked_rq) too.
BTW, if you update this patch, no need to resend 1-3. Just send the updated
patch as a reply to this one.
Thanks.
--
tejun
scx_locked_rq() is used both from ext.c and ext_idle.c, move it to ext.h
as a static inline function.
No functional changes.
v2: Rename locked_rq to scx_locked_rq_state, expose it and make
scx_locked_rq() inline, as suggested by Tejun.
Signed-off-by: Andrea Righi <arighi@nvidia.com>
---
kernel/sched/ext.c | 13 ++-----------
kernel/sched/ext.h | 11 +++++++++++
2 files changed, 13 insertions(+), 11 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 3e483138dff60..3623ba98d7d83 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1247,7 +1247,7 @@ static void scx_kf_disallow(u32 mask)
* This allows kfuncs to safely operate on rq from any scx ops callback,
* knowing which rq is already locked.
*/
-static DEFINE_PER_CPU(struct rq *, locked_rq);
+DEFINE_PER_CPU(struct rq *, scx_locked_rq_state);
static inline void update_locked_rq(struct rq *rq)
{
@@ -1258,16 +1258,7 @@ static inline void update_locked_rq(struct rq *rq)
*/
if (rq)
lockdep_assert_rq_held(rq);
- __this_cpu_write(locked_rq, rq);
-}
-
-/*
- * Return the rq currently locked from an scx callback, or NULL if no rq is
- * locked.
- */
-static inline struct rq *scx_locked_rq(void)
-{
- return __this_cpu_read(locked_rq);
+ __this_cpu_write(scx_locked_rq_state, rq);
}
#define SCX_CALL_OP(sch, mask, op, rq, args...) \
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index d30f2d1bc00d5..6d6d00e9de20f 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -20,6 +20,17 @@ static inline bool scx_rq_bypassing(struct rq *rq)
DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
+DECLARE_PER_CPU(struct rq *, scx_locked_rq_state);
+
+/*
+ * Return the rq currently locked from an scx callback, or NULL if no rq is
+ * locked.
+ */
+static inline struct rq *scx_locked_rq(void)
+{
+ return __this_cpu_read(scx_locked_rq_state);
+}
+
void scx_tick(struct rq *rq);
void init_scx_entity(struct sched_ext_entity *scx);
void scx_pre_fork(struct task_struct *p);
--
2.49.0
© 2016 - 2025 Red Hat, Inc.