Provide scx_bpf_remote_curr() as a way for scx schedulers to check the curr
task of a remote rq without assuming its lock is held.
Many scx schedulers make use of scx_bpf_cpu_rq() to check a remote curr
(e.g. to see if it should be preempted). This is problematic because
scx_bpf_cpu_rq() provides access to all fields of struct rq, most of
which aren't safe to use without holding the associated rq lock.
Signed-off-by: Christian Loehle <christian.loehle@arm.com>
---
kernel/sched/ext.c | 14 ++++++++++++++
tools/sched_ext/include/scx/common.bpf.h | 1 +
2 files changed, 15 insertions(+)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 9fcc310d85d5..e242a2520f06 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -7452,6 +7452,19 @@ __bpf_kfunc struct rq *scx_bpf_cpu_rq_locked(void)
return rq;
}
+/**
+ * scx_bpf_remote_curr - Return remote CPU's curr task
+ * @cpu: CPU of interest
+ *
+ * Callers must hold RCU read lock (KF_RCU).
+ */
+__bpf_kfunc struct task_struct *scx_bpf_remote_curr(s32 cpu)
+{
+ if (!kf_cpu_valid(cpu, NULL))
+ return NULL;
+ return READ_ONCE(cpu_rq(cpu)->curr);
+}
+
/**
* scx_bpf_task_cgroup - Return the sched cgroup of a task
* @p: task of interest
@@ -7617,6 +7630,7 @@ BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
BTF_ID_FLAGS(func, scx_bpf_cpu_rq_locked, KF_RET_NULL)
+BTF_ID_FLAGS(func, scx_bpf_remote_curr, KF_RET_NULL | KF_RCU)
#ifdef CONFIG_CGROUP_SCHED
BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
#endif
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index f5be06c93359..dd3d94256c10 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -92,6 +92,7 @@ bool scx_bpf_task_running(const struct task_struct *p) __ksym;
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
struct rq *scx_bpf_cpu_rq_locked(void) __ksym;
+struct task_struct *scx_bpf_remote_curr(s32 cpu) __ksym;
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
u64 scx_bpf_now(void) __ksym __weak;
void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
--
2.34.1
Hello, On Mon, Sep 01, 2025 at 02:26:03PM +0100, Christian Loehle wrote: > +/** > + * scx_bpf_remote_curr - Return remote CPU's curr task > + * @cpu: CPU of interest > + * > + * Callers must hold RCU read lock (KF_RCU). > + */ > +__bpf_kfunc struct task_struct *scx_bpf_remote_curr(s32 cpu) And name this scx_bpf_cpu_curr(). Thanks. -- tejun
Hi Christian,
On Mon, Sep 01, 2025 at 02:26:03PM +0100, Christian Loehle wrote:
> Provide scx_bpf_remote_curr() as a way for scx schedulers to check the curr
> task of a remote rq without assuming its lock is held.
>
> Many scx schedulers make use of scx_bpf_cpu_rq() to check a remote curr
> (e.g. to see if it should be preempted). This is problematic because
> scx_bpf_cpu_rq() provides access to all fields of struct rq, most of
> which aren't safe to use without holding the associated rq lock.
>
> Signed-off-by: Christian Loehle <christian.loehle@arm.com>
> ---
> kernel/sched/ext.c | 14 ++++++++++++++
> tools/sched_ext/include/scx/common.bpf.h | 1 +
> 2 files changed, 15 insertions(+)
>
> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> index 9fcc310d85d5..e242a2520f06 100644
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -7452,6 +7452,19 @@ __bpf_kfunc struct rq *scx_bpf_cpu_rq_locked(void)
> return rq;
> }
>
> +/**
> + * scx_bpf_remote_curr - Return remote CPU's curr task
> + * @cpu: CPU of interest
> + *
> + * Callers must hold RCU read lock (KF_RCU).
> + */
> +__bpf_kfunc struct task_struct *scx_bpf_remote_curr(s32 cpu)
> +{
> + if (!kf_cpu_valid(cpu, NULL))
> + return NULL;
> + return READ_ONCE(cpu_rq(cpu)->curr);
It shouldn't be rcu_dereference(cpu_rq(cpu)->curr)?
Thanks,
-Andrea
> +}
> +
> /**
> * scx_bpf_task_cgroup - Return the sched cgroup of a task
> * @p: task of interest
> @@ -7617,6 +7630,7 @@ BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
> BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
> BTF_ID_FLAGS(func, scx_bpf_cpu_rq)
> BTF_ID_FLAGS(func, scx_bpf_cpu_rq_locked, KF_RET_NULL)
> +BTF_ID_FLAGS(func, scx_bpf_remote_curr, KF_RET_NULL | KF_RCU)
> #ifdef CONFIG_CGROUP_SCHED
> BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE)
> #endif
> diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
> index f5be06c93359..dd3d94256c10 100644
> --- a/tools/sched_ext/include/scx/common.bpf.h
> +++ b/tools/sched_ext/include/scx/common.bpf.h
> @@ -92,6 +92,7 @@ bool scx_bpf_task_running(const struct task_struct *p) __ksym;
> s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
> struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
> struct rq *scx_bpf_cpu_rq_locked(void) __ksym;
> +struct task_struct *scx_bpf_remote_curr(s32 cpu) __ksym;
> struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
> u64 scx_bpf_now(void) __ksym __weak;
> void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
> --
> 2.34.1
>
© 2016 - 2026 Red Hat, Inc.