EAS is based on wakeup events to efficiently place tasks on the system, but
there are cases where a task will not have wakeup events anymore or at a
far too low pace. For such situation, we can take advantage of the task
being put back in the enqueued list to check if it should be migrated on
another CPU.
Wake up events remain the main way to migrate tasks but we now detect
situation where a task is stuck on a CPU by checking that its utilization
is larger than the max available compute capacity (max cpu capacity or
uclamp max setting)
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/fair.c | 206 +++++++++++++++++++++++++++++++++++++++++++
kernel/sched/sched.h | 2 +
2 files changed, 208 insertions(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cd046e8216a9..2affc063da55 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7088,6 +7088,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
+static void dequeue_pushable_task(struct rq *rq, struct task_struct *p);
static void set_next_buddy(struct sched_entity *se);
/*
@@ -7118,6 +7119,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
h_nr_idle = task_has_idle_policy(p);
if (task_sleep || task_delayed || !se->sched_delayed)
h_nr_runnable = 1;
+
+ if (task_sleep || task_on_rq_migrating(p))
+ dequeue_pushable_task(rq, p);
} else {
cfs_rq = group_cfs_rq(se);
slice = cfs_rq_min_slice(cfs_rq);
@@ -8617,6 +8621,182 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
return target;
}
+static inline bool task_misfit_cpu(struct task_struct *p, int cpu)
+{
+ unsigned long max_capa = get_actual_cpu_capacity(cpu);
+ unsigned long util = task_util_est(p);
+
+ max_capa = min(max_capa, uclamp_eff_value(p, UCLAMP_MAX));
+ util = max(util, task_runnable(p));
+
+ /*
+ * Return true only if the task might not sleep/wakeup because of a low
+ * compute capacity. Tasks, which wake up regularly, will be handled by
+ * feec().
+ */
+ return (util > max_capa);
+}
+
+static int active_load_balance_cpu_stop(void *data);
+
+static inline void migrate_misfit_task(struct task_struct *p, struct rq *rq)
+{
+ int new_cpu, cpu = cpu_of(rq);
+
+ if (!sched_energy_enabled() || is_rd_overutilized(rq->rd))
+ return;
+
+ if (WARN_ON(!p))
+ return;
+
+ if (WARN_ON(p != rq->curr))
+ return;
+
+ if (is_migration_disabled(p))
+ return;
+
+ if ((rq->nr_running > 1) || (p->nr_cpus_allowed == 1))
+ return;
+
+ if (!task_misfit_cpu(p, cpu))
+ return;
+
+ new_cpu = find_energy_efficient_cpu(p, cpu);
+
+ if (new_cpu == cpu)
+ return;
+
+ /*
+ * ->active_balance synchronizes accesses to
+ * ->active_balance_work. Once set, it's cleared
+ * only after active load balance is finished.
+ */
+ if (!rq->active_balance) {
+ rq->active_balance = 1;
+ rq->push_cpu = new_cpu;
+ } else
+ return;
+
+ raw_spin_rq_unlock(rq);
+ stop_one_cpu_nowait(cpu,
+ active_load_balance_cpu_stop, rq,
+ &rq->active_balance_work);
+ raw_spin_rq_lock(rq);
+}
+
+static inline int has_pushable_tasks(struct rq *rq)
+{
+ return !plist_head_empty(&rq->cfs.pushable_tasks);
+}
+
+static struct task_struct *pick_next_pushable_fair_task(struct rq *rq)
+{
+ struct task_struct *p;
+
+ if (!has_pushable_tasks(rq))
+ return NULL;
+
+ p = plist_first_entry(&rq->cfs.pushable_tasks,
+ struct task_struct, pushable_tasks);
+
+ WARN_ON_ONCE(rq->cpu != task_cpu(p));
+ WARN_ON_ONCE(task_current(rq, p));
+ WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
+ WARN_ON_ONCE(!task_on_rq_queued(p));
+
+ /*
+ * Remove task from the pushable list as we try only once after that
+ * the task has been put back in enqueued list.
+ */
+ plist_del(&p->pushable_tasks, &rq->cfs.pushable_tasks);
+
+ return p;
+}
+
+/*
+ * See if the non running fair tasks on this rq can be sent on other CPUs
+ * that fits better with their profile.
+ */
+static bool push_fair_task(struct rq *rq)
+{
+ struct task_struct *next_task;
+ int prev_cpu, new_cpu;
+ struct rq *new_rq;
+
+ next_task = pick_next_pushable_fair_task(rq);
+ if (!next_task)
+ return false;
+
+ if (is_migration_disabled(next_task))
+ return true;
+
+ /* We might release rq lock */
+ get_task_struct(next_task);
+
+ prev_cpu = rq->cpu;
+
+ new_cpu = find_energy_efficient_cpu(next_task, prev_cpu);
+
+ if (new_cpu == prev_cpu)
+ goto out;
+
+ new_rq = cpu_rq(new_cpu);
+
+ if (double_lock_balance(rq, new_rq)) {
+ /* The task has already migrated in between */
+ if (task_cpu(next_task) != rq->cpu) {
+ double_unlock_balance(rq, new_rq);
+ goto out;
+ }
+
+ deactivate_task(rq, next_task, 0);
+ set_task_cpu(next_task, new_cpu);
+ activate_task(new_rq, next_task, 0);
+
+ resched_curr(new_rq);
+
+ double_unlock_balance(rq, new_rq);
+ }
+
+out:
+ put_task_struct(next_task);
+
+ return true;
+}
+
+static void push_fair_tasks(struct rq *rq)
+{
+ /* push_fair_task() will return true if it moved a fair task */
+ while (push_fair_task(rq))
+ ;
+}
+
+static DEFINE_PER_CPU(struct balance_callback, fair_push_head);
+
+static inline void fair_queue_push_tasks(struct rq *rq)
+{
+ if (!sched_energy_enabled() || !has_pushable_tasks(rq))
+ return;
+
+ queue_balance_callback(rq, &per_cpu(fair_push_head, rq->cpu), push_fair_tasks);
+}
+static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+ if (sched_energy_enabled())
+ plist_del(&p->pushable_tasks, &rq->cfs.pushable_tasks);
+}
+
+static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
+{
+ if (sched_energy_enabled() && task_on_rq_queued(p) && !p->se.sched_delayed) {
+ if (!is_rd_overutilized(rq->rd) && task_misfit_cpu(p, rq->cpu)) {
+ plist_del(&p->pushable_tasks, &rq->cfs.pushable_tasks);
+ plist_node_init(&p->pushable_tasks, p->prio);
+ plist_add(&p->pushable_tasks, &rq->cfs.pushable_tasks);
+ }
+ }
+}
+
/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
* that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
@@ -8786,6 +8966,10 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
return sched_balance_newidle(rq, rf) != 0;
}
#else
+static inline void migrate_misfit_task(struct task_struct *p, struct rq *rq) {}
+static inline void fair_queue_push_tasks(struct rq *rq) {}
+static void dequeue_pushable_task(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+static inline void enqueue_pushable_task(struct cfs_rq *cfs_rq, struct task_struct *p) {}
static inline void set_task_max_allowed_capacity(struct task_struct *p) {}
#endif /* CONFIG_SMP */
@@ -8968,6 +9152,12 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
put_prev_entity(cfs_rq, pse);
set_next_entity(cfs_rq, se);
+ /*
+ * The previous task might be eligible for being pushed on
+ * another cpu if it is still runnable.
+ */
+ enqueue_pushable_task(rq, prev);
+
__set_next_task_fair(rq, p, true);
}
@@ -9040,6 +9230,13 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct t
cfs_rq = cfs_rq_of(se);
put_prev_entity(cfs_rq, se);
}
+
+ /*
+ * The previous task might be eligible for pushing it on
+ * another cpu if it is still active.
+ */
+ enqueue_pushable_task(rq, prev);
+
}
/*
@@ -13102,6 +13299,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
if (static_branch_unlikely(&sched_numa_balancing))
task_tick_numa(rq, curr);
+ migrate_misfit_task(curr, rq);
update_misfit_status(curr, rq);
check_update_overutilized_status(task_rq(curr));
@@ -13254,6 +13452,8 @@ static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool firs
{
struct sched_entity *se = &p->se;
+ dequeue_pushable_task(rq, p);
+
#ifdef CONFIG_SMP
if (task_on_rq_queued(p)) {
/*
@@ -13271,6 +13471,11 @@ static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool firs
if (hrtick_enabled_fair(rq))
hrtick_start_fair(rq, p);
+ /*
+ * Try to push prev task before checking misfit for next task as
+ * the migration of prev can make next fitting the CPU
+ */
+ fair_queue_push_tasks(rq);
update_misfit_status(p, rq);
sched_fair_update_stop_tick(rq, p);
}
@@ -13301,6 +13506,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq->tasks_timeline = RB_ROOT_CACHED;
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
#ifdef CONFIG_SMP
+ plist_head_init(&cfs_rq->pushable_tasks);
raw_spin_lock_init(&cfs_rq->removed.lock);
#endif
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index aef716c41edb..c9875cd4c986 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -717,6 +717,8 @@ struct cfs_rq {
struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */
+ struct plist_head pushable_tasks;
+
/* Locally cached copy of our task_group's idle value */
int idle;
--
2.43.0
Hello Vincent,
On 12/17/24 17:07, Vincent Guittot wrote:
> EAS is based on wakeup events to efficiently place tasks on the system, but
> there are cases where a task will not have wakeup events anymore or at a
> far too low pace. For such situation, we can take advantage of the task
> being put back in the enqueued list to check if it should be migrated on
> another CPU.
>
> Wake up events remain the main way to migrate tasks but we now detect
> situation where a task is stuck on a CPU by checking that its utilization
> is larger than the max available compute capacity (max cpu capacity or
> uclamp max setting)
It seems there are 2 distinct cases:
a- The task is alone on a rq
b- The task shares the rq and is enqueued/dequeued
a. doesn't seem to need any of the push functions, and b. doesn't seem to
need any of the misfit functions. Maybe it's worth splitting the patch in 2.
>
> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
> kernel/sched/fair.c | 206 +++++++++++++++++++++++++++++++++++++++++++
> kernel/sched/sched.h | 2 +
> 2 files changed, 208 insertions(+)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index cd046e8216a9..2affc063da55 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -7088,6 +7088,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
> hrtick_update(rq);
> }
>
> +static void dequeue_pushable_task(struct rq *rq, struct task_struct *p);
> static void set_next_buddy(struct sched_entity *se);
>
> /*
> @@ -7118,6 +7119,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
> h_nr_idle = task_has_idle_policy(p);
> if (task_sleep || task_delayed || !se->sched_delayed)
> h_nr_runnable = 1;
> +
> + if (task_sleep || task_on_rq_migrating(p))
> + dequeue_pushable_task(rq, p);
> } else {
> cfs_rq = group_cfs_rq(se);
> slice = cfs_rq_min_slice(cfs_rq);
> @@ -8617,6 +8621,182 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
> return target;
> }
>
> +static inline bool task_misfit_cpu(struct task_struct *p, int cpu)
> +{
> + unsigned long max_capa = get_actual_cpu_capacity(cpu);
> + unsigned long util = task_util_est(p);
> +
> + max_capa = min(max_capa, uclamp_eff_value(p, UCLAMP_MAX));
> + util = max(util, task_runnable(p));
> +
> + /*
> + * Return true only if the task might not sleep/wakeup because of a low
> + * compute capacity. Tasks, which wake up regularly, will be handled by
> + * feec().
> + */
NIT:
On a little CPU with min_OPP=256 and max_OPP=512,
a task with a util=100 and U_Max=10 will trigger this condition.
However:
- the task is already well placed from a power PoV
- the tasks has opportunities to sleep/wake-up
Shouldn't we ideally take:
unsigned long max_capa;
max_capa = max(min_capa(cpu), uclamp_eff_value(p, UCLAMP_MAX));
max_capa = min(get_actual_cpu_capacity(cpu), max_capa);
with min_capa(cpu) returning 256 in this case, i.e. the CPU capacity at the
lowest OPP ?
> + return (util > max_capa);
> +}
> +
[...]
On Thu, 16 Jan 2025 at 18:34, Pierre Gondois <pierre.gondois@arm.com> wrote:
>
> Hello Vincent,
>
> On 12/17/24 17:07, Vincent Guittot wrote:
> > EAS is based on wakeup events to efficiently place tasks on the system, but
> > there are cases where a task will not have wakeup events anymore or at a
> > far too low pace. For such situation, we can take advantage of the task
> > being put back in the enqueued list to check if it should be migrated on
> > another CPU.
> >
> > Wake up events remain the main way to migrate tasks but we now detect
> > situation where a task is stuck on a CPU by checking that its utilization
> > is larger than the max available compute capacity (max cpu capacity or
> > uclamp max setting)
>
> It seems there are 2 distinct cases:
> a- The task is alone on a rq
> b- The task shares the rq and is enqueued/dequeued
Do you mean pick/set and put instead of enqueued/dequeued ? which are
the events used for push callback
The enqueue/dequeue_pushable_task names are maybe a bit misleading
because they mean tasks are enqueued/dequeued from the pushable list
but not enqueued/dequeued from the rq. I should probably rename them
add/remove_pushable_task to avoid confusion
>
> a. doesn't seem to need any of the push functions, and b. doesn't seem to
> need any of the misfit functions. Maybe it's worth splitting the patch in 2.
In both case we check if there is a reason for the task not being
enqueued on the right cpu but I can split anyway this patch in 2 if
it's make it easier to review
>
> >
> > Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
> > ---
> > kernel/sched/fair.c | 206 +++++++++++++++++++++++++++++++++++++++++++
> > kernel/sched/sched.h | 2 +
> > 2 files changed, 208 insertions(+)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index cd046e8216a9..2affc063da55 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -7088,6 +7088,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
> > hrtick_update(rq);
> > }
> >
> > +static void dequeue_pushable_task(struct rq *rq, struct task_struct *p);
> > static void set_next_buddy(struct sched_entity *se);
> >
> > /*
> > @@ -7118,6 +7119,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
> > h_nr_idle = task_has_idle_policy(p);
> > if (task_sleep || task_delayed || !se->sched_delayed)
> > h_nr_runnable = 1;
> > +
> > + if (task_sleep || task_on_rq_migrating(p))
> > + dequeue_pushable_task(rq, p);
> > } else {
> > cfs_rq = group_cfs_rq(se);
> > slice = cfs_rq_min_slice(cfs_rq);
> > @@ -8617,6 +8621,182 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
> > return target;
> > }
> >
> > +static inline bool task_misfit_cpu(struct task_struct *p, int cpu)
> > +{
> > + unsigned long max_capa = get_actual_cpu_capacity(cpu);
> > + unsigned long util = task_util_est(p);
> > +
> > + max_capa = min(max_capa, uclamp_eff_value(p, UCLAMP_MAX));
> > + util = max(util, task_runnable(p));
> > +
> > + /*
> > + * Return true only if the task might not sleep/wakeup because of a low
> > + * compute capacity. Tasks, which wake up regularly, will be handled by
> > + * feec().
> > + */
>
> NIT:
> On a little CPU with min_OPP=256 and max_OPP=512,
> a task with a util=100 and U_Max=10 will trigger this condition.
> However:
> - the task is already well placed from a power PoV
> - the tasks has opportunities to sleep/wake-up
I agree. I took a wide condition to start with and plan to narrow it
step by step
> Shouldn't we ideally take:
>
> unsigned long max_capa;
> max_capa = max(min_capa(cpu), uclamp_eff_value(p, UCLAMP_MAX));
fair enough. will add it for next version
> max_capa = min(get_actual_cpu_capacity(cpu), max_capa);
>
> with min_capa(cpu) returning 256 in this case, i.e. the CPU capacity at the
> lowest OPP ?
>
> > + return (util > max_capa);
> > +}
> > +
>
> [...]
© 2016 - 2025 Red Hat, Inc.