On 2024/5/28 08:35, Ankur Arora wrote:
> The default preemption policy for voluntary preemption under
> PREEMPT_AUTO is to schedule eagerly for tasks of higher scheduling
> class, and lazily for well-behaved, non-idle tasks.
>
> This is the same policy as preempt=none, with an eager handling of
> higher priority scheduling classes.
>
> Comparing a cyclictest workload with a background kernel load of
> 'stress-ng --mmap', shows that both the average and the maximum
> latencies improve:
>
> # stress-ng --mmap 0 &
> # cyclictest --mlockall --smp --priority=80 --interval=200 --distance=0 -q -D 300
>
> Min ( %stdev ) Act ( %stdev ) Avg ( %stdev ) Max ( %stdev )
>
> PREEMPT_AUTO, preempt=voluntary 1.73 ( +- 25.43% ) 62.16 ( +- 303.39% ) 14.92 ( +- 17.96% ) 2778.22 ( +- 15.04% )
> PREEMPT_DYNAMIC, preempt=voluntary 1.83 ( +- 20.76% ) 253.45 ( +- 233.21% ) 18.70 ( +- 15.88% ) 2992.45 ( +- 15.95% )
>
> The table above shows the aggregated latencies across all CPUs.
>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Peter Ziljstra <peterz@infradead.org>
> Cc: Juri Lelli <juri.lelli@redhat.com>
> Cc: Vincent Guittot <vincent.guittot@linaro.org>
> Originally-by: Thomas Gleixner <tglx@linutronix.de>
> Link: https://lore.kernel.org/lkml/87jzshhexi.ffs@tglx/
> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
> ---
> kernel/sched/core.c | 12 ++++++++----
> kernel/sched/sched.h | 6 ++++++
> 2 files changed, 14 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index c25cccc09b65..2bc3ae21a9d0 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -1052,6 +1052,9 @@ static resched_t resched_opt_translate(struct task_struct *curr,
> if (preempt_model_preemptible())
> return RESCHED_NOW;
>
> + if (preempt_model_voluntary() && opt == RESCHED_PRIORITY)
> + return RESCHED_NOW;
> +
> if (is_idle_task(curr))
> return RESCHED_NOW;
>
> @@ -2289,7 +2292,7 @@ void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
> if (p->sched_class == rq->curr->sched_class)
> rq->curr->sched_class->wakeup_preempt(rq, p, flags);
> else if (sched_class_above(p->sched_class, rq->curr->sched_class))
> - resched_curr(rq);
> + resched_curr_priority(rq);
>
Besides the conditions about higher class, can we do resched_curr_priority() in the same class?
For example, in fair class, we can do it when SCHED_NORMAL vs SCHED_IDLE.
Maybe sth like
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 41b58387023d..eedb70234bdd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8352,6 +8352,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int cse_is_idle, pse_is_idle;
+ enum resched_opt opt = RESCHED_PRIORITY;
if (unlikely(se == pse))
return;
@@ -8385,7 +8386,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
/* Idle tasks are by definition preempted by non-idle tasks. */
if (unlikely(task_has_idle_policy(curr)) &&
likely(!task_has_idle_policy(p)))
- goto preempt;
+ goto preempt; /* RESCHED_PRIORITY */
/*
* Batch and idle tasks do not preempt non-idle tasks (their preemption
@@ -8405,7 +8406,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
* in the inverse case).
*/
if (cse_is_idle && !pse_is_idle)
- goto preempt;
+ goto preempt; /* RESCHED_PRIORITY */
if (cse_is_idle != pse_is_idle)
return;
@@ -8415,13 +8416,15 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
/*
* XXX pick_eevdf(cfs_rq) != se ?
*/
- if (pick_eevdf(cfs_rq) == pse)
+ if (pick_eevdf(cfs_rq) == pse) {
+ opt = RESCHED_DEFAULT;
goto preempt;
+ }
return;
preempt:
- resched_curr(rq);
+ __resched_curr(rq, opt);
}
#ifdef CONFIG_SMP