kernel/sched/ext.c | 6 +++--- tools/sched_ext/scx_qmap.bpf.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-)
From: Zhao Mengmeng <zhaomengmeng@kylinos.cn>
1. replace hardcoding with SCX_KF_UNLOCKED.
2. scx_next_task_picked() has been replaced with siwtch_class().
3. minor typo fixes.
Signed-off-by: Zhao Mengmeng <zhaomengmeng@kylinos.cn>
---
kernel/sched/ext.c | 6 +++---
tools/sched_ext/scx_qmap.bpf.c | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 1b1c33f12dd7..832f77d1d318 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -2759,10 +2759,10 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
* If the previous sched_class for the current CPU was not SCX,
* notify the BPF scheduler that it again has control of the
* core. This callback complements ->cpu_release(), which is
- * emitted in scx_next_task_picked().
+ * emitted in switch_class().
*/
if (SCX_HAS_OP(cpu_acquire))
- SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
+ SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_acquire, cpu_of(rq), NULL);
rq->scx.cpu_released = false;
}
@@ -6096,7 +6096,7 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work)
if (cpu != cpu_of(this_rq)) {
/*
* Pairs with smp_store_release() issued by this CPU in
- * scx_next_task_picked() on the resched path.
+ * switch_class() on the resched path.
*
* We busy-wait here to guarantee that no other task can
* be scheduled on our core before the target CPU has
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
index ee264947e0c3..f230641929ec 100644
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -5,7 +5,7 @@
* There are five FIFOs implemented using BPF_MAP_TYPE_QUEUE. A task gets
* assigned to one depending on its compound weight. Each CPU round robins
* through the FIFOs and dispatches more from FIFOs with higher indices - 1 from
- * queue0, 2 from queue1, 4 from queue2 and so on.
+ * queue0, 2 from queue1, 3 from queue2 and so on.
*
* This scheduler demonstrates:
*
--
2.43.0
Hello,
On Wed, Nov 13, 2024 at 10:59:08AM +0800, Zhao Mengmeng wrote:
> From: Zhao Mengmeng <zhaomengmeng@kylinos.cn>
>
> 1. replace hardcoding with SCX_KF_UNLOCKED.
> 2. scx_next_task_picked() has been replaced with siwtch_class().
> 3. minor typo fixes.
>
> Signed-off-by: Zhao Mengmeng <zhaomengmeng@kylinos.cn>
> ---
> kernel/sched/ext.c | 6 +++---
> tools/sched_ext/scx_qmap.bpf.c | 2 +-
> 2 files changed, 4 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> index 1b1c33f12dd7..832f77d1d318 100644
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -2759,10 +2759,10 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
> * If the previous sched_class for the current CPU was not SCX,
> * notify the BPF scheduler that it again has control of the
> * core. This callback complements ->cpu_release(), which is
> - * emitted in scx_next_task_picked().
> + * emitted in switch_class().
> */
> if (SCX_HAS_OP(cpu_acquire))
> - SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
> + SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_acquire, cpu_of(rq), NULL);
I think this is actually a bug. David, shouldn't this be SCX_KF_REST?
> rq->scx.cpu_released = false;
> }
>
> @@ -6096,7 +6096,7 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work)
> if (cpu != cpu_of(this_rq)) {
> /*
> * Pairs with smp_store_release() issued by this CPU in
> - * scx_next_task_picked() on the resched path.
> + * switch_class() on the resched path.
> *
> * We busy-wait here to guarantee that no other task can
> * be scheduled on our core before the target CPU has
> diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
> index ee264947e0c3..f230641929ec 100644
> --- a/tools/sched_ext/scx_qmap.bpf.c
> +++ b/tools/sched_ext/scx_qmap.bpf.c
> @@ -5,7 +5,7 @@
> * There are five FIFOs implemented using BPF_MAP_TYPE_QUEUE. A task gets
> * assigned to one depending on its compound weight. Each CPU round robins
> * through the FIFOs and dispatches more from FIFOs with higher indices - 1 from
> - * queue0, 2 from queue1, 4 from queue2 and so on.
> + * queue0, 2 from queue1, 3 from queue2 and so on.
The number to dispatch is determined by:
cpuc->dsp_cnt = 1 << cpuc->dsp_idx;
I think the existing comment is correct.
Thasnks.
--
tejun
On 2024/11/15 01:12, Tejun Heo wrote:
> Hello,
>
> On Wed, Nov 13, 2024 at 10:59:08AM +0800, Zhao Mengmeng wrote:
>> From: Zhao Mengmeng <zhaomengmeng@kylinos.cn>
>>
>> 1. replace hardcoding with SCX_KF_UNLOCKED.
>> 2. scx_next_task_picked() has been replaced with siwtch_class().
>> 3. minor typo fixes.
>>
>> Signed-off-by: Zhao Mengmeng <zhaomengmeng@kylinos.cn>
>> ---
>> kernel/sched/ext.c | 6 +++---
>> tools/sched_ext/scx_qmap.bpf.c | 2 +-
>> 2 files changed, 4 insertions(+), 4 deletions(-)
>>
>> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
>> index 1b1c33f12dd7..832f77d1d318 100644
>> --- a/kernel/sched/ext.c
>> +++ b/kernel/sched/ext.c
>> @@ -2759,10 +2759,10 @@ static int balance_one(struct rq *rq, struct task_struct *prev)
>> * If the previous sched_class for the current CPU was not SCX,
>> * notify the BPF scheduler that it again has control of the
>> * core. This callback complements ->cpu_release(), which is
>> - * emitted in scx_next_task_picked().
>> + * emitted in switch_class().
>> */
>> if (SCX_HAS_OP(cpu_acquire))
>> - SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL);
>> + SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_acquire, cpu_of(rq), NULL);
>
> I think this is actually a bug. David, shouldn't this be SCX_KF_REST?
>
>> rq->scx.cpu_released = false;
>> }
Got it.
>> @@ -6096,7 +6096,7 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work)
>> if (cpu != cpu_of(this_rq)) {
>> /*
>> * Pairs with smp_store_release() issued by this CPU in
>> - * scx_next_task_picked() on the resched path.
>> + * switch_class() on the resched path.
>> *
>> * We busy-wait here to guarantee that no other task can
>> * be scheduled on our core before the target CPU has
>> diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
>> index ee264947e0c3..f230641929ec 100644
>> --- a/tools/sched_ext/scx_qmap.bpf.c
>> +++ b/tools/sched_ext/scx_qmap.bpf.c
>> @@ -5,7 +5,7 @@
>> * There are five FIFOs implemented using BPF_MAP_TYPE_QUEUE. A task gets
>> * assigned to one depending on its compound weight. Each CPU round robins
>> * through the FIFOs and dispatches more from FIFOs with higher indices - 1 from
>> - * queue0, 2 from queue1, 4 from queue2 and so on.
>> + * queue0, 2 from queue1, 3 from queue2 and so on.
>
> The number to dispatch is determined by:
>
> cpuc->dsp_cnt = 1 << cpuc->dsp_idx;
>
> I think the existing comment is correct.
You are right, I missed something. Will send a V3
On Thu, Nov 14, 2024 at 07:12:31AM -1000, Tejun Heo wrote: > Hello, > > On Wed, Nov 13, 2024 at 10:59:08AM +0800, Zhao Mengmeng wrote: > > From: Zhao Mengmeng <zhaomengmeng@kylinos.cn> > > > > 1. replace hardcoding with SCX_KF_UNLOCKED. > > 2. scx_next_task_picked() has been replaced with siwtch_class(). > > 3. minor typo fixes. > > > > Signed-off-by: Zhao Mengmeng <zhaomengmeng@kylinos.cn> > > --- > > kernel/sched/ext.c | 6 +++--- > > tools/sched_ext/scx_qmap.bpf.c | 2 +- > > 2 files changed, 4 insertions(+), 4 deletions(-) > > > > diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c > > index 1b1c33f12dd7..832f77d1d318 100644 > > --- a/kernel/sched/ext.c > > +++ b/kernel/sched/ext.c > > @@ -2759,10 +2759,10 @@ static int balance_one(struct rq *rq, struct task_struct *prev) > > * If the previous sched_class for the current CPU was not SCX, > > * notify the BPF scheduler that it again has control of the > > * core. This callback complements ->cpu_release(), which is > > - * emitted in scx_next_task_picked(). > > + * emitted in switch_class(). > > */ > > if (SCX_HAS_OP(cpu_acquire)) > > - SCX_CALL_OP(0, cpu_acquire, cpu_of(rq), NULL); > > + SCX_CALL_OP(SCX_KF_UNLOCKED, cpu_acquire, cpu_of(rq), NULL); > > I think this is actually a bug. David, shouldn't this be SCX_KF_REST? Yes, good catch. We're holding the rq lock so SCX_KF_UNLOCKED isn't safe. I agree this should be SCX_KF_REST. Thanks, David
© 2016 - 2026 Red Hat, Inc.