From: Tim C Chen <tim.c.chen@linux.intel.com>
On hybrid CPUs with scheduling cluster enabled, we will need to
consider balancing between SMT CPU cluster, and Atom core cluster.
Below shows such a hybrid x86 CPU with 4 big cores and 8 atom cores.
Each scheduling cluster span a L2 cache.
--L2-- --L2-- --L2-- --L2-- ----L2---- -----L2------
[0, 1] [2, 3] [4, 5] [5, 6] [7 8 9 10] [11 12 13 14]
Big Big Big Big Atom Atom
core core core core Module Module
If the busiest group is a big core with both SMT CPUs busy, we should
active load balance if destination group has idle CPU cores. Such
condition is considered by asym_active_balance() in load balancing but not
considered when looking for busiest group and computing load imbalance.
Add this consideration in find_busiest_group() and calculate_imbalance().
In addition, update the logic determining the busier group when one group
is SMT and the other group is non SMT but both groups are partially busy
with idle CPU. The busier group should be the group with idle cores rather
than the group with one busy SMT CPU. We do not want to make the SMT group
the busiest one to pull the only task off SMT CPU and causing the whole core to
go empty.
Otherwise suppose in the search for the busiest group, we first encounter
an SMT group with 1 task and set it as the busiest. The destination
group is an atom cluster with 1 task and we next encounter an atom
cluster group with 3 tasks, we will not pick this atom cluster over the
SMT group, even though we should. As a result, we do not load balance
the busier Atom cluster (with 3 tasks) towards the local atom cluster
(with 1 task). And it doesn't make sense to pick the 1 task SMT group
as the busier group as we also should not pull task off the SMT towards
the 1 task atom cluster and make the SMT core completely empty.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
kernel/sched/fair.c | 80 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 77 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 87317634fab2..f636d6c09dc6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8279,6 +8279,11 @@ enum group_type {
* more powerful CPU.
*/
group_misfit_task,
+ /*
+ * Balance SMT group that's fully busy. Can benefit from migration
+ * a task on SMT with busy sibling to another CPU on idle core.
+ */
+ group_smt_balance,
/*
* SD_ASYM_PACKING only: One local CPU with higher capacity is available,
* and the task should be migrated to it instead of running on the
@@ -8987,6 +8992,7 @@ struct sg_lb_stats {
unsigned int group_weight;
enum group_type group_type;
unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
+ unsigned int group_smt_balance; /* Task on busy SMT be moved */
unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
@@ -9260,6 +9266,9 @@ group_type group_classify(unsigned int imbalance_pct,
if (sgs->group_asym_packing)
return group_asym_packing;
+ if (sgs->group_smt_balance)
+ return group_smt_balance;
+
if (sgs->group_misfit_task_load)
return group_misfit_task;
@@ -9333,6 +9342,36 @@ sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs
return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
}
+/* One group has more than one SMT CPU while the other group does not */
+static inline bool smt_vs_nonsmt_groups(struct sched_group *sg1,
+ struct sched_group *sg2)
+{
+ if (!sg1 || !sg2)
+ return false;
+
+ return (sg1->flags & SD_SHARE_CPUCAPACITY) !=
+ (sg2->flags & SD_SHARE_CPUCAPACITY);
+}
+
+static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs,
+ struct sched_group *group)
+{
+ if (env->idle == CPU_NOT_IDLE)
+ return false;
+
+ /*
+ * For SMT source group, it is better to move a task
+ * to a CPU that doesn't have multiple tasks sharing its CPU capacity.
+ * Note that if a group has a single SMT, SD_SHARE_CPUCAPACITY
+ * will not be on.
+ */
+ if (group->flags & SD_SHARE_CPUCAPACITY &&
+ sgs->sum_h_nr_running > 1)
+ return true;
+
+ return false;
+}
+
static inline bool
sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
{
@@ -9425,6 +9464,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_asym_packing = 1;
}
+ /* Check for loaded SMT group to be balanced to dst CPU */
+ if (!local_group && smt_balance(env, sgs, group))
+ sgs->group_smt_balance = 1;
+
sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
/* Computing avg_load makes sense only when group is overloaded */
@@ -9509,6 +9552,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
return false;
break;
+ case group_smt_balance:
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
@@ -9537,6 +9581,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
break;
case group_has_spare:
+ /*
+ * Do not pick sg with SMT CPUs over sg with pure CPUs,
+ * as we do not want to pull task off SMT core with one task
+ * and make the core idle.
+ */
+ if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
+ if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
+ return false;
+ else
+ return true;
+ }
+
/*
* Select not overloaded group with lowest number of idle cpus
* and highest number of running tasks. We could also compare
@@ -9733,6 +9789,7 @@ static bool update_pick_idlest(struct sched_group *idlest,
case group_imbalanced:
case group_asym_packing:
+ case group_smt_balance:
/* Those types are not used in the slow wakeup path */
return false;
@@ -9864,6 +9921,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
case group_imbalanced:
case group_asym_packing:
+ case group_smt_balance:
/* Those type are not used in the slow wakeup path */
return NULL;
@@ -10118,6 +10176,13 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
return;
}
+ if (busiest->group_type == group_smt_balance) {
+ /* Reduce number of tasks sharing CPU capacity */
+ env->migration_type = migrate_task;
+ env->imbalance = 1;
+ return;
+ }
+
if (busiest->group_type == group_imbalanced) {
/*
* In the group_imb case we cannot rely on group-wide averages
@@ -10363,16 +10428,23 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
goto force_balance;
if (busiest->group_type != group_overloaded) {
- if (env->idle == CPU_NOT_IDLE)
+ if (env->idle == CPU_NOT_IDLE) {
/*
* If the busiest group is not overloaded (and as a
* result the local one too) but this CPU is already
* busy, let another idle CPU try to pull task.
*/
goto out_balanced;
+ }
+
+ if (busiest->group_type == group_smt_balance &&
+ smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
+ /* Let non SMT CPU pull from SMT CPU sharing with sibling */
+ goto force_balance;
+ }
if (busiest->group_weight > 1 &&
- local->idle_cpus <= (busiest->idle_cpus + 1))
+ local->idle_cpus <= (busiest->idle_cpus + 1)) {
/*
* If the busiest group is not overloaded
* and there is no imbalance between this and busiest
@@ -10383,12 +10455,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
* there is more than 1 CPU per group.
*/
goto out_balanced;
+ }
- if (busiest->sum_h_nr_running == 1)
+ if (busiest->sum_h_nr_running == 1) {
/*
* busiest doesn't have any tasks waiting to run
*/
goto out_balanced;
+ }
}
force_balance:
--
2.32.0
On 7/8/23 4:27 AM, Tim Chen wrote:
> From: Tim C Chen <tim.c.chen@linux.intel.com>
>
Hi Tim. Sorry for the delayed response.
> On hybrid CPUs with scheduling cluster enabled, we will need to
> consider balancing between SMT CPU cluster, and Atom core cluster.
>
> Below shows such a hybrid x86 CPU with 4 big cores and 8 atom cores.
> Each scheduling cluster span a L2 cache.
>
> --L2-- --L2-- --L2-- --L2-- ----L2---- -----L2------
> [0, 1] [2, 3] [4, 5] [5, 6] [7 8 9 10] [11 12 13 14]
> Big Big Big Big Atom Atom
> core core core core Module Module
>
> If the busiest group is a big core with both SMT CPUs busy, we should
> active load balance if destination group has idle CPU cores. Such
> condition is considered by asym_active_balance() in load balancing but not
> considered when looking for busiest group and computing load imbalance.
> Add this consideration in find_busiest_group() and calculate_imbalance().
>
> In addition, update the logic determining the busier group when one group
> is SMT and the other group is non SMT but both groups are partially busy
> with idle CPU. The busier group should be the group with idle cores rather
> than the group with one busy SMT CPU. We do not want to make the SMT group
> the busiest one to pull the only task off SMT CPU and causing the whole core to
> go empty.
>
> Otherwise suppose in the search for the busiest group, we first encounter
> an SMT group with 1 task and set it as the busiest. The destination
> group is an atom cluster with 1 task and we next encounter an atom
> cluster group with 3 tasks, we will not pick this atom cluster over the
> SMT group, even though we should. As a result, we do not load balance
> the busier Atom cluster (with 3 tasks) towards the local atom cluster
> (with 1 task). And it doesn't make sense to pick the 1 task SMT group
> as the busier group as we also should not pull task off the SMT towards
> the 1 task atom cluster and make the SMT core completely empty.
>
> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> ---
> kernel/sched/fair.c | 80 +++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 77 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 87317634fab2..f636d6c09dc6 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -8279,6 +8279,11 @@ enum group_type {
> * more powerful CPU.
> */
> group_misfit_task,
> + /*
> + * Balance SMT group that's fully busy. Can benefit from migration
> + * a task on SMT with busy sibling to another CPU on idle core.
> + */
> + group_smt_balance,
Could you please explain what group_smt_balance does differently? AFAIU it is doing the same
thing as group_fully_busy but for one domain above SMT domains right?
> /*
> * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
> * and the task should be migrated to it instead of running on the
> @@ -8987,6 +8992,7 @@ struct sg_lb_stats {
> unsigned int group_weight;
> enum group_type group_type;
> unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
> + unsigned int group_smt_balance; /* Task on busy SMT be moved */
> unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
> #ifdef CONFIG_NUMA_BALANCING
> unsigned int nr_numa_running;
> @@ -9260,6 +9266,9 @@ group_type group_classify(unsigned int imbalance_pct,
> if (sgs->group_asym_packing)
> return group_asym_packing;
>
> + if (sgs->group_smt_balance)
> + return group_smt_balance;
> +
> if (sgs->group_misfit_task_load)
> return group_misfit_task;
>
> @@ -9333,6 +9342,36 @@ sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs
> return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
> }
>
> +/* One group has more than one SMT CPU while the other group does not */
> +static inline bool smt_vs_nonsmt_groups(struct sched_group *sg1,
> + struct sched_group *sg2)
> +{
> + if (!sg1 || !sg2)
> + return false;
> +
> + return (sg1->flags & SD_SHARE_CPUCAPACITY) !=
> + (sg2->flags & SD_SHARE_CPUCAPACITY);
> +}
> +
> +static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs,
> + struct sched_group *group)
> +{
> + if (env->idle == CPU_NOT_IDLE)
> + return false;
> +
> + /*
> + * For SMT source group, it is better to move a task
> + * to a CPU that doesn't have multiple tasks sharing its CPU capacity.
> + * Note that if a group has a single SMT, SD_SHARE_CPUCAPACITY
> + * will not be on.
> + */
> + if (group->flags & SD_SHARE_CPUCAPACITY &&
> + sgs->sum_h_nr_running > 1)
> + return true;
> +
If we consider symmetric platforms which have SMT4 such as power10.
we have a topology like below. multiple such MC will form DIE(PKG)
[0 2 4 6][1 3 5 7][8 10 12 14][9 11 13 15]
[--SMT--][--SMT--][----SMT---][---SMT----]
[--sg1--][--sg1--][---sg1----][---sg1----]
[--------------MC------------------------]
In case of SMT4, if there is any group which has 2 or more tasks, that
group will be marked as group_smt_balance. previously, if that group had 2
or 3 tasks, it would have been marked as group_has_spare. Since all the groups have
SMT that means behavior would be same fully busy right? That can cause some
corner cases. No?
One example is Lets say sg1 has 4 tasks. and sg2 has 0 tasks and is trying to do
load balance. Previously imbalance would have been 2, instead now imbalance would be 1.
But in subsequent lb it would be balanced.
> + return false;
> +}
> +
> static inline bool
> sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
> {
> @@ -9425,6 +9464,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> sgs->group_asym_packing = 1;
> }
>
> + /* Check for loaded SMT group to be balanced to dst CPU */
> + if (!local_group && smt_balance(env, sgs, group))
> + sgs->group_smt_balance = 1;
> +
> sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
>
> /* Computing avg_load makes sense only when group is overloaded */
> @@ -9509,6 +9552,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> return false;
> break;
>
> + case group_smt_balance:
> case group_fully_busy:
> /*
> * Select the fully busy group with highest avg_load. In
> @@ -9537,6 +9581,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> break;
>
> case group_has_spare:
> + /*
> + * Do not pick sg with SMT CPUs over sg with pure CPUs,
> + * as we do not want to pull task off SMT core with one task
> + * and make the core idle.
> + */
> + if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
> + if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
> + return false;
> + else
> + return true;> + }
> +
> /*
> * Select not overloaded group with lowest number of idle cpus
> * and highest number of running tasks. We could also compare
> @@ -9733,6 +9789,7 @@ static bool update_pick_idlest(struct sched_group *idlest,
>
> case group_imbalanced:
> case group_asym_packing:
> + case group_smt_balance:
> /* Those types are not used in the slow wakeup path */
> return false;
>
> @@ -9864,6 +9921,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
>
> case group_imbalanced:
> case group_asym_packing:
> + case group_smt_balance:
> /* Those type are not used in the slow wakeup path */
> return NULL;
>
> @@ -10118,6 +10176,13 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
> return;
> }
>
> + if (busiest->group_type == group_smt_balance) {
> + /* Reduce number of tasks sharing CPU capacity */
> + env->migration_type = migrate_task;
> + env->imbalance = 1;
> + return;
> + }
> +
> if (busiest->group_type == group_imbalanced) {
> /*
> * In the group_imb case we cannot rely on group-wide averages
> @@ -10363,16 +10428,23 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
> goto force_balance;
>
> if (busiest->group_type != group_overloaded) {
> - if (env->idle == CPU_NOT_IDLE)
> + if (env->idle == CPU_NOT_IDLE) {
> /*
> * If the busiest group is not overloaded (and as a
> * result the local one too) but this CPU is already
> * busy, let another idle CPU try to pull task.
> */
> goto out_balanced;
> + }
> +
> + if (busiest->group_type == group_smt_balance &&
> + smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
> + /* Let non SMT CPU pull from SMT CPU sharing with sibling */
> + goto force_balance;
> + }
>
> if (busiest->group_weight > 1 &&
> - local->idle_cpus <= (busiest->idle_cpus + 1))
> + local->idle_cpus <= (busiest->idle_cpus + 1)) {
> /*
> * If the busiest group is not overloaded
> * and there is no imbalance between this and busiest
> @@ -10383,12 +10455,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
> * there is more than 1 CPU per group.
> */
> goto out_balanced;
> + }
>
> - if (busiest->sum_h_nr_running == 1)
> + if (busiest->sum_h_nr_running == 1) {
> /*
> * busiest doesn't have any tasks waiting to run
> */
> goto out_balanced;
> + }
> }
>
> force_balance:
On Fri, 2023-07-14 at 18:36 +0530, Shrikanth Hegde wrote:
>
>
> If we consider symmetric platforms which have SMT4 such as power10.
> we have a topology like below. multiple such MC will form DIE(PKG)
>
>
> [0 2 4 6][1 3 5 7][8 10 12 14][9 11 13 15]
> [--SMT--][--SMT--][----SMT---][---SMT----]
> [--sg1--][--sg1--][---sg1----][---sg1----]
> [--------------MC------------------------]
>
> In case of SMT4, if there is any group which has 2 or more tasks, that
> group will be marked as group_smt_balance. previously, if that group had 2
> or 3 tasks, it would have been marked as group_has_spare. Since all the groups have
> SMT that means behavior would be same fully busy right? That can cause some
> corner cases. No?
You raised a good point. I was looking from SMT2
perspective so group_smt_balance implies group_fully_busy.
That is no longer true for SMT4.
I am thinking of the following fix on the current patch
to take care of SMT4. Do you think this addresses
concerns from you and Tobias?
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 294a662c9410..3fc8d3a3bd22 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9588,6 +9588,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
break;
case group_smt_balance:
+ /* no idle cpus on both groups handled by group_fully_busy below */
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
+ if (sgs->idle_cpus > busiest->idle_cpus)
+ return false;
+ if (sgs->idle_cpus < busiest->idle_cpus)
+ return true;
+ if (sgs->sum_nr_running <= busiest_sum_nr_running)
+ return false;
+ else
+ return true;
+ }
I will be on vacation next three weeks so my response will be slow.
Tim
>
> One example is Lets say sg1 has 4 tasks. and sg2 has 0 tasks and is trying to do
> load balance. Previously imbalance would have been 2, instead now imbalance would be 1.
> But in subsequent lb it would be balanced.
>
>
>
> > + return false;
> > +}
> > +
> > static inline bool
> > sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
> > {
> > @@ -9425,6 +9464,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> > sgs->group_asym_packing = 1;
> > }
> >
> > + /* Check for loaded SMT group to be balanced to dst CPU */
> > + if (!local_group && smt_balance(env, sgs, group))
> > + sgs->group_smt_balance = 1;
> > +
> > sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
> >
> > /* Computing avg_load makes sense only when group is overloaded */
> > @@ -9509,6 +9552,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > return false;
> > break;
> >
> > + case group_smt_balance:
> > case group_fully_busy:
> > /*
> > * Select the fully busy group with highest avg_load. In
> > @@ -9537,6 +9581,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > break;
> >
> > case group_has_spare:
> > + /*
> > + * Do not pick sg with SMT CPUs over sg with pure CPUs,
> > + * as we do not want to pull task off SMT core with one task
> > + * and make the core idle.
> > + */
> > + if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
> > + if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
> > + return false;
> > + else
> > + return true;> + }
> > +
> > /*
> > * Select not overloaded group with lowest number of idle cpus
> > * and highest number of running tasks. We could also compare
> > @@ -9733,6 +9789,7 @@ static bool update_pick_idlest(struct sched_group *idlest,
> >
> > case group_imbalanced:
> > case group_asym_packing:
> > + case group_smt_balance:
> > /* Those types are not used in the slow wakeup path */
> > return false;
> >
> > @@ -9864,6 +9921,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
> >
> > case group_imbalanced:
> > case group_asym_packing:
> > + case group_smt_balance:
> > /* Those type are not used in the slow wakeup path */
> > return NULL;
> >
> > @@ -10118,6 +10176,13 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
> > return;
> > }
> >
> > + if (busiest->group_type == group_smt_balance) {
> > + /* Reduce number of tasks sharing CPU capacity */
> > + env->migration_type = migrate_task;
> > + env->imbalance = 1;
> > + return;
> > + }
> > +
> > if (busiest->group_type == group_imbalanced) {
> > /*
> > * In the group_imb case we cannot rely on group-wide averages
> > @@ -10363,16 +10428,23 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
> > goto force_balance;
> >
> > if (busiest->group_type != group_overloaded) {
> > - if (env->idle == CPU_NOT_IDLE)
> > + if (env->idle == CPU_NOT_IDLE) {
> > /*
> > * If the busiest group is not overloaded (and as a
> > * result the local one too) but this CPU is already
> > * busy, let another idle CPU try to pull task.
> > */
> > goto out_balanced;
> > + }
> > +
> > + if (busiest->group_type == group_smt_balance &&
> > + smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
> > + /* Let non SMT CPU pull from SMT CPU sharing with sibling */
> > + goto force_balance;
> > + }
> >
> > if (busiest->group_weight > 1 &&
> > - local->idle_cpus <= (busiest->idle_cpus + 1))
> > + local->idle_cpus <= (busiest->idle_cpus + 1)) {
> > /*
> > * If the busiest group is not overloaded
> > * and there is no imbalance between this and busiest
> > @@ -10383,12 +10455,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
> > * there is more than 1 CPU per group.
> > */
> > goto out_balanced;
> > + }
> >
> > - if (busiest->sum_h_nr_running == 1)
> > + if (busiest->sum_h_nr_running == 1) {
> > /*
> > * busiest doesn't have any tasks waiting to run
> > */
> > goto out_balanced;
> > + }
> > }
> >
> > force_balance:
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 294a662c9410..3fc8d3a3bd22 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9588,6 +9588,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> break;
>
> case group_smt_balance:
> + /* no idle cpus on both groups handled by group_fully_busy below */
> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> + if (sgs->idle_cpus > busiest->idle_cpus)
> + return false;
> + if (sgs->idle_cpus < busiest->idle_cpus)
> + return true;
> + if (sgs->sum_nr_running <= busiest_sum_nr_running)
typo: should be busiest->sum->nr_running
> + return false;
> + else
> + return true;
> + }
>
Tim
On 7/15/23 4:35 AM, Tim Chen wrote:
> On Fri, 2023-07-14 at 18:36 +0530, Shrikanth Hegde wrote:
>
>>
>>
>> If we consider symmetric platforms which have SMT4 such as power10.
>> we have a topology like below. multiple such MC will form DIE(PKG)
>>
>>
>> [0 2 4 6][1 3 5 7][8 10 12 14][9 11 13 15]
>> [--SMT--][--SMT--][----SMT---][---SMT----]
>> [--sg1--][--sg1--][---sg1----][---sg1----]
>> [--------------MC------------------------]
>>
>> In case of SMT4, if there is any group which has 2 or more tasks, that
>> group will be marked as group_smt_balance. previously, if that group had 2
>> or 3 tasks, it would have been marked as group_has_spare. Since all the groups have
>> SMT that means behavior would be same fully busy right? That can cause some
>> corner cases. No?
>
> You raised a good point. I was looking from SMT2
> perspective so group_smt_balance implies group_fully_busy.
> That is no longer true for SMT4.
>
> I am thinking of the following fix on the current patch
> to take care of SMT4. Do you think this addresses
Thanks Tim for taking a look at it again.
Yes. I think this would address some of the corner cases.
Any SMT4 group having 2,3,4 will have smt_balance as the group type, and busiest one
is the one which has least number of idle cpu's. (same conditions as group_has_spare)
> concerns from you and Tobias?
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 294a662c9410..3fc8d3a3bd22 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9588,6 +9588,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> break;
>
> case group_smt_balance:
> + /* no idle cpus on both groups handled by group_fully_busy below */
> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> + if (sgs->idle_cpus > busiest->idle_cpus)
> + return false;
> + if (sgs->idle_cpus < busiest->idle_cpus)
> + return true;
> + if (sgs->sum_nr_running <= busiest_sum_nr_running)
> + return false;
> + else
> + return true;
> + }
>
>
> I will be on vacation next three weeks so my response will be slow.
>
> Tim
>
>>
Small suggestion to above code to avoid compiler warning of switch case falling
through and else case can be removed, since update_sd_pick_busiest by default returns true.
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e5a75c76bcaa..ae364ac6f22e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9728,9 +9728,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
return true;
if (sgs->sum_nr_running <= busiest->sum_nr_running)
return false;
- else
- return true;
}
+ break;
+
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
>> One example is Lets say sg1 has 4 tasks. and sg2 has 0 tasks and is trying to do
>> load balance. Previously imbalance would have been 2, instead now imbalance would be 1.
>> But in subsequent lb it would be balanced.
>>
>>
>>
>>> + return false;
>>> +}
>>> +
>>> static inline bool
>>> sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
>>> {
>>> @@ -9425,6 +9464,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
>>> sgs->group_asym_packing = 1;
>>> }
>>>
>>> + /* Check for loaded SMT group to be balanced to dst CPU */
>>> + if (!local_group && smt_balance(env, sgs, group))
>>> + sgs->group_smt_balance = 1;
>>> +
>>> sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
>>>
>>> /* Computing avg_load makes sense only when group is overloaded */
>>> @@ -9509,6 +9552,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>> return false;
>>> break;
>>>
>>> + case group_smt_balance:
>>> case group_fully_busy:
>>> /*
>>> * Select the fully busy group with highest avg_load. In
>>> @@ -9537,6 +9581,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>> break;
>>>
>>> case group_has_spare:
>>> + /*
>>> + * Do not pick sg with SMT CPUs over sg with pure CPUs,
>>> + * as we do not want to pull task off SMT core with one task
>>> + * and make the core idle.
>>> + */
>>> + if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
>>> + if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
>>> + return false;
>>> + else
>>> + return true;> + }
>>> +
>>> /*
>>> * Select not overloaded group with lowest number of idle cpus
>>> * and highest number of running tasks. We could also compare
>>> @@ -9733,6 +9789,7 @@ static bool update_pick_idlest(struct sched_group *idlest,
>>>
>>> case group_imbalanced:
>>> case group_asym_packing:
>>> + case group_smt_balance:
>>> /* Those types are not used in the slow wakeup path */
>>> return false;
>>>
>>> @@ -9864,6 +9921,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
>>>
>>> case group_imbalanced:
>>> case group_asym_packing:
>>> + case group_smt_balance:
>>> /* Those type are not used in the slow wakeup path */
>>> return NULL;
>>>
>>> @@ -10118,6 +10176,13 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
>>> return;
>>> }
>>>
>>> + if (busiest->group_type == group_smt_balance) {
>>> + /* Reduce number of tasks sharing CPU capacity */
>>> + env->migration_type = migrate_task;
>>> + env->imbalance = 1;
>>> + return;
>>> + }
>>> +
>>> if (busiest->group_type == group_imbalanced) {
>>> /*
>>> * In the group_imb case we cannot rely on group-wide averages
>>> @@ -10363,16 +10428,23 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
>>> goto force_balance;
>>>
>>> if (busiest->group_type != group_overloaded) {
>>> - if (env->idle == CPU_NOT_IDLE)
>>> + if (env->idle == CPU_NOT_IDLE) {
>>> /*
>>> * If the busiest group is not overloaded (and as a
>>> * result the local one too) but this CPU is already
>>> * busy, let another idle CPU try to pull task.
>>> */
>>> goto out_balanced;
>>> + }
>>> +
>>> + if (busiest->group_type == group_smt_balance &&
>>> + smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
>>> + /* Let non SMT CPU pull from SMT CPU sharing with sibling */
>>> + goto force_balance;
>>> + }
>>>
>>> if (busiest->group_weight > 1 &&
>>> - local->idle_cpus <= (busiest->idle_cpus + 1))
>>> + local->idle_cpus <= (busiest->idle_cpus + 1)) {
>>> /*
>>> * If the busiest group is not overloaded
>>> * and there is no imbalance between this and busiest
>>> @@ -10383,12 +10455,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
>>> * there is more than 1 CPU per group.
>>> */
>>> goto out_balanced;
>>> + }
>>>
>>> - if (busiest->sum_h_nr_running == 1)
>>> + if (busiest->sum_h_nr_running == 1) {
>>> /*
>>> * busiest doesn't have any tasks waiting to run
>>> */
>>> goto out_balanced;
>>> + }
>>> }
>>>
>>> force_balance:
>
On Mon, Jul 17, 2023 at 01:06:59AM +0530, Shrikanth Hegde wrote:
>
>
> On 7/15/23 4:35 AM, Tim Chen wrote:
> > On Fri, 2023-07-14 at 18:36 +0530, Shrikanth Hegde wrote:
> >
> >>
> >>
> >> If we consider symmetric platforms which have SMT4 such as power10.
> >> we have a topology like below. multiple such MC will form DIE(PKG)
> >>
> >>
> >> [0 2 4 6][1 3 5 7][8 10 12 14][9 11 13 15]
> >> [--SMT--][--SMT--][----SMT---][---SMT----]
> >> [--sg1--][--sg1--][---sg1----][---sg1----]
> >> [--------------MC------------------------]
> >>
> >> In case of SMT4, if there is any group which has 2 or more tasks, that
> >> group will be marked as group_smt_balance. previously, if that group had 2
> >> or 3 tasks, it would have been marked as group_has_spare. Since all the groups have
> >> SMT that means behavior would be same fully busy right? That can cause some
> >> corner cases. No?
> >
> > You raised a good point. I was looking from SMT2
> > perspective so group_smt_balance implies group_fully_busy.
> > That is no longer true for SMT4.
> >
> > I am thinking of the following fix on the current patch
> > to take care of SMT4. Do you think this addresses
>
> Thanks Tim for taking a look at it again.
>
> Yes. I think this would address some of the corner cases.
> Any SMT4 group having 2,3,4 will have smt_balance as the group type, and busiest one
> is the one which has least number of idle cpu's. (same conditions as group_has_spare)
>
>
>
>
> > concerns from you and Tobias?
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 294a662c9410..3fc8d3a3bd22 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -9588,6 +9588,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > break;
> >
> > case group_smt_balance:
> > + /* no idle cpus on both groups handled by group_fully_busy below */
> > + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> > + if (sgs->idle_cpus > busiest->idle_cpus)
> > + return false;
> > + if (sgs->idle_cpus < busiest->idle_cpus)
> > + return true;
> > + if (sgs->sum_nr_running <= busiest_sum_nr_running)
> > + return false;
> > + else
> > + return true;
> > + }
> >
> >
> > I will be on vacation next three weeks so my response will be slow.
> >
> > Tim
> >
> >>
>
> Small suggestion to above code to avoid compiler warning of switch case falling
> through and else case can be removed, since update_sd_pick_busiest by default returns true.
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index e5a75c76bcaa..ae364ac6f22e 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9728,9 +9728,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> return true;
> if (sgs->sum_nr_running <= busiest->sum_nr_running)
> return false;
> - else
> - return true;
> }
> + break;
> +
> case group_fully_busy:
> /*
> * Select the fully busy group with highest avg_load. In
>
>
Can someone please send a full patch for this? I've already queued Tim's
patches in tip/sched/core (tip-bot seems to have died somewhere last
week, it's being worked on).
On 7/17/23 4:40 PM, Peter Zijlstra wrote:
> On Mon, Jul 17, 2023 at 01:06:59AM +0530, Shrikanth Hegde wrote:
>>
>>
>> On 7/15/23 4:35 AM, Tim Chen wrote:
>>> On Fri, 2023-07-14 at 18:36 +0530, Shrikanth Hegde wrote:
>>>
>>>>
>>>>
>>>> If we consider symmetric platforms which have SMT4 such as power10.
>>>> we have a topology like below. multiple such MC will form DIE(PKG)
>>>>
>>>>
>>>> [0 2 4 6][1 3 5 7][8 10 12 14][9 11 13 15]
>>>> [--SMT--][--SMT--][----SMT---][---SMT----]
>>>> [--sg1--][--sg1--][---sg1----][---sg1----]
>>>> [--------------MC------------------------]
>>>>
>>>> In case of SMT4, if there is any group which has 2 or more tasks, that
>>>> group will be marked as group_smt_balance. previously, if that group had 2
>>>> or 3 tasks, it would have been marked as group_has_spare. Since all the groups have
>>>> SMT that means behavior would be same fully busy right? That can cause some
>>>> corner cases. No?
>>>
>>> You raised a good point. I was looking from SMT2
>>> perspective so group_smt_balance implies group_fully_busy.
>>> That is no longer true for SMT4.
>>>
>>> I am thinking of the following fix on the current patch
>>> to take care of SMT4. Do you think this addresses
>>
>> Thanks Tim for taking a look at it again.
>>
>> Yes. I think this would address some of the corner cases.
>> Any SMT4 group having 2,3,4 will have smt_balance as the group type, and busiest one
>> is the one which has least number of idle cpu's. (same conditions as group_has_spare)
>>
>>
>>
>>
>>> concerns from you and Tobias?
>>>
>>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>>> index 294a662c9410..3fc8d3a3bd22 100644
>>> --- a/kernel/sched/fair.c
>>> +++ b/kernel/sched/fair.c
>>> @@ -9588,6 +9588,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>> break;
>>>
>>> case group_smt_balance:
>>> + /* no idle cpus on both groups handled by group_fully_busy below */
>>> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
>>> + if (sgs->idle_cpus > busiest->idle_cpus)
>>> + return false;
>>> + if (sgs->idle_cpus < busiest->idle_cpus)
>>> + return true;
>>> + if (sgs->sum_nr_running <= busiest_sum_nr_running)
>>> + return false;
>>> + else
>>> + return true;
>>> + }
>>>
>>>
>>> I will be on vacation next three weeks so my response will be slow.
>>>
>>> Tim
>>>
>>>>
>>
>> Small suggestion to above code to avoid compiler warning of switch case falling
>> through and else case can be removed, since update_sd_pick_busiest by default returns true.
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index e5a75c76bcaa..ae364ac6f22e 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -9728,9 +9728,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>> return true;
>> if (sgs->sum_nr_running <= busiest->sum_nr_running)
>> return false;
>> - else
>> - return true;
>> }
>> + break;
>> +
>> case group_fully_busy:
>> /*
>> * Select the fully busy group with highest avg_load. In
>>
>>
>
> Can someone please send a full patch for this? I've already queued Tim's
> patches in tip/sched/core (tip-bot seems to have died somewhere last
> week, it's being worked on).
Hi Peter.
Sending on behalf of tim. I have included my suggestion as well. Hope that's ok.
Please find below the patch as of now. it includes the couple of changes that are discussed. (in 1/6 and in 3/6)
---
kernel/sched/fair.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 932e7b78894a..9502013abe33 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9532,7 +9532,7 @@ static inline long sibling_imbalance(struct lb_env *env,
imbalance /= ncores_local + ncores_busiest;
/* Take advantage of resource in an empty sched group */
- if (imbalance == 0 && local->sum_nr_running == 0 &&
+ if (imbalance <= 1 && local->sum_nr_running == 0 &&
busiest->sum_nr_running > 1)
imbalance = 2;
@@ -9720,6 +9720,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
break;
case group_smt_balance:
+ /* no idle cpus on both groups handled by group_fully_busy below */
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
+ if (sgs->idle_cpus > busiest->idle_cpus)
+ return false;
+ if (sgs->idle_cpus < busiest->idle_cpus)
+ return true;
+ if (sgs->sum_nr_running <= busiest->sum_nr_running)
+ return false;
+ }
+ break;
+
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
--
2.31.1
On Mon, Jul 17, 2023 at 05:48:02PM +0530, Shrikanth Hegde wrote:
> Hi Peter.
>
> Sending on behalf of tim. I have included my suggestion as well. Hope
> that's ok. Please find below the patch as of now. it includes the
> couple of changes that are discussed. (in 1/6 and in 3/6)
Could you please add a Changelog and SoB thingies such that I can apply
the thing?
Given Tim is on holidays, perhaps do something like:
Originally-by: Tim Chen <...>
After all, you did some changes and verified it actually works etc..
> ---
> kernel/sched/fair.c | 13 ++++++++++++-
> 1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 932e7b78894a..9502013abe33 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9532,7 +9532,7 @@ static inline long sibling_imbalance(struct lb_env *env,
> imbalance /= ncores_local + ncores_busiest;
>
> /* Take advantage of resource in an empty sched group */
> - if (imbalance == 0 && local->sum_nr_running == 0 &&
> + if (imbalance <= 1 && local->sum_nr_running == 0 &&
> busiest->sum_nr_running > 1)
> imbalance = 2;
>
> @@ -9720,6 +9720,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> break;
>
> case group_smt_balance:
> + /* no idle cpus on both groups handled by group_fully_busy below */
> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> + if (sgs->idle_cpus > busiest->idle_cpus)
> + return false;
> + if (sgs->idle_cpus < busiest->idle_cpus)
> + return true;
> + if (sgs->sum_nr_running <= busiest->sum_nr_running)
> + return false;
> + }
> + break;
> +
> case group_fully_busy:
> /*
> * Select the fully busy group with highest avg_load. In
> --
> 2.31.1
From: Tim Chen <tim.c.chen@linux.intel.com>
For SMT4, any group with more than 2 tasks will be marked as
group_smt_balance. Retain the behaviour of group_has_spare by marking
the busiest group as the group which has the least number of idle_cpus.
Also, handle rounding effect of adding (ncores_local + ncores_busy)
when the local is fully idle and busy group has more than 2 tasks.
Local group should try to pull at least 1 task in this case.
Originally-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
---
kernel/sched/fair.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 932e7b78894a..9502013abe33 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9532,7 +9532,7 @@ static inline long sibling_imbalance(struct lb_env *env,
imbalance /= ncores_local + ncores_busiest;
/* Take advantage of resource in an empty sched group */
- if (imbalance == 0 && local->sum_nr_running == 0 &&
+ if (imbalance <= 1 && local->sum_nr_running == 0 &&
busiest->sum_nr_running > 1)
imbalance = 2;
@@ -9720,6 +9720,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
break;
case group_smt_balance:
+ /* no idle cpus on both groups handled by group_fully_busy below */
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
+ if (sgs->idle_cpus > busiest->idle_cpus)
+ return false;
+ if (sgs->idle_cpus < busiest->idle_cpus)
+ return true;
+ if (sgs->sum_nr_running <= busiest->sum_nr_running)
+ return false;
+ }
+ break;
+
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
--
2.31.1
On Mon, 2023-07-17 at 20:28 +0530, Shrikanth Hegde wrote:
> From: Tim Chen <tim.c.chen@linux.intel.com>
>
> For SMT4, any group with more than 2 tasks will be marked as
> group_smt_balance. Retain the behaviour of group_has_spare by marking
> the busiest group as the group which has the least number of idle_cpus.
>
> Also, handle rounding effect of adding (ncores_local + ncores_busy)
> when the local is fully idle and busy group has more than 2 tasks.
> Local group should try to pull at least 1 task in this case.
>
> Originally-by: Tim Chen <tim.c.chen@linux.intel.com>
> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> Signed-off-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
> ---
> kernel/sched/fair.c | 13 ++++++++++++-
> 1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 932e7b78894a..9502013abe33 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9532,7 +9532,7 @@ static inline long sibling_imbalance(struct lb_env *env,
> imbalance /= ncores_local + ncores_busiest;
>
> /* Take advantage of resource in an empty sched group */
> - if (imbalance == 0 && local->sum_nr_running == 0 &&
> + if (imbalance <= 1 && local->sum_nr_running == 0 &&
> busiest->sum_nr_running > 1)
> imbalance = 2;
>
> @@ -9720,6 +9720,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> break;
>
> case group_smt_balance:
> + /* no idle cpus on both groups handled by group_fully_busy below */
> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> + if (sgs->idle_cpus > busiest->idle_cpus)
> + return false;
> + if (sgs->idle_cpus < busiest->idle_cpus)
> + return true;
> + if (sgs->sum_nr_running <= busiest->sum_nr_running)
> + return false;
> + }
> + break;
> +
> case group_fully_busy:
> /*
> * Select the fully busy group with highest avg_load. In
Thanks for the fix up patch.
Acked-by: Tim Chen <tim.c.chen@linux.intel.com>
On Wed, 2023-07-26 at 20:11 -0700, Tim Chen wrote:
> On Mon, 2023-07-17 at 20:28 +0530, Shrikanth Hegde wrote:
> > From: Tim Chen <tim.c.chen@linux.intel.com>
> >
> > For SMT4, any group with more than 2 tasks will be marked as
> > group_smt_balance. Retain the behaviour of group_has_spare by marking
> > the busiest group as the group which has the least number of idle_cpus.
> >
> > Also, handle rounding effect of adding (ncores_local + ncores_busy)
> > when the local is fully idle and busy group has more than 2 tasks.
> > Local group should try to pull at least 1 task in this case.
> >
> > Originally-by: Tim Chen <tim.c.chen@linux.intel.com>
> > Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> > Signed-off-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
> > ---
> > kernel/sched/fair.c | 13 ++++++++++++-
> > 1 file changed, 12 insertions(+), 1 deletion(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 932e7b78894a..9502013abe33 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -9532,7 +9532,7 @@ static inline long sibling_imbalance(struct lb_env *env,
> > imbalance /= ncores_local + ncores_busiest;
> >
> > /* Take advantage of resource in an empty sched group */
> > - if (imbalance == 0 && local->sum_nr_running == 0 &&
> > + if (imbalance <= 1 && local->sum_nr_running == 0 &&
> > busiest->sum_nr_running > 1)
> > imbalance = 2;
> >
> > @@ -9720,6 +9720,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > break;
> >
> > case group_smt_balance:
> > + /* no idle cpus on both groups handled by group_fully_busy below */
> > + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> > + if (sgs->idle_cpus > busiest->idle_cpus)
> > + return false;
> > + if (sgs->idle_cpus < busiest->idle_cpus)
> > + return true;
> > + if (sgs->sum_nr_running <= busiest->sum_nr_running)
> > + return false;
> > + }
> > + break;
Shrikanth and Peter,
Sorry, I acked Shrikanth's fixup patch too quickly without seeing that Shrikanth added
a "break" in the patch above. My original code did not have that break statement as
I did intend the code to fall through to the "group_fully_busy" code path when
there are no idle cpus in both groups. To make the compiler happy and putting
in the correct logic, I refresh the patch as below.
Thanks.
Tim
From: Tim Chen <tim.c.chen@linux.intel.com>
Date: Fri, 14 Jul 2023 16:09:30 -0700
Subject: [PATCH] sched/fair: Add SMT4 group_smt_balance handling
For SMT4, any group with more than 2 tasks will be marked as
group_smt_balance. Retain the behaviour of group_has_spare by marking
the busiest group as the group which has the least number of idle_cpus.
Also, handle rounding effect of adding (ncores_local + ncores_busy)
when the local is fully idle and busy group has more than 2 tasks.
Local group should try to pull at least 1 task in this case.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
kernel/sched/fair.c | 18 ++++++++++++++++--
1 file changed, 16 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a87988327f88..566686c5f2bd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9563,7 +9563,7 @@ static inline long sibling_imbalance(struct lb_env *env,
imbalance /= ncores_local + ncores_busiest;
/* Take advantage of resource in an empty sched group */
- if (imbalance == 0 && local->sum_nr_running == 0 &&
+ if (imbalance <= 1 && local->sum_nr_running == 0 &&
busiest->sum_nr_running > 1)
imbalance = 2;
@@ -9751,6 +9751,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
break;
case group_smt_balance:
+ /* no idle cpus on both groups handled by group_fully_busy below */
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
+ if (sgs->idle_cpus > busiest->idle_cpus)
+ return false;
+ if (sgs->idle_cpus < busiest->idle_cpus)
+ return true;
+ if (sgs->sum_nr_running <= busiest->sum_nr_running)
+ return false;
+ else
+ return true;
+ }
+ goto fully_busy;
+ break;
+
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
@@ -9763,7 +9777,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* select the 1st one, except if @sg is composed of SMT
* siblings.
*/
-
+fully_busy:
if (sgs->avg_load < busiest->avg_load)
return false;
--
2.32.0
On Thu, Jul 27, 2023 at 06:32:44AM -0700, Tim Chen wrote:
> kernel/sched/fair.c | 18 ++++++++++++++++--
> 1 file changed, 16 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index a87988327f88..566686c5f2bd 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9563,7 +9563,7 @@ static inline long sibling_imbalance(struct lb_env *env,
> imbalance /= ncores_local + ncores_busiest;
>
> /* Take advantage of resource in an empty sched group */
> - if (imbalance == 0 && local->sum_nr_running == 0 &&
> + if (imbalance <= 1 && local->sum_nr_running == 0 &&
> busiest->sum_nr_running > 1)
> imbalance = 2;
>
> @@ -9751,6 +9751,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> break;
>
> case group_smt_balance:
> + /* no idle cpus on both groups handled by group_fully_busy below */
> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> + if (sgs->idle_cpus > busiest->idle_cpus)
> + return false;
> + if (sgs->idle_cpus < busiest->idle_cpus)
> + return true;
> + if (sgs->sum_nr_running <= busiest->sum_nr_running)
> + return false;
> + else
> + return true;
> + }
> + goto fully_busy;
> + break;
This is really daft; why can't this simply be: fallthrough; ? At the
very least that break must go.
> +
> case group_fully_busy:
> /*
> * Select the fully busy group with highest avg_load. In
> @@ -9763,7 +9777,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> * select the 1st one, except if @sg is composed of SMT
> * siblings.
> */
> -
> +fully_busy:
> if (sgs->avg_load < busiest->avg_load)
> return false;
>
> --
> 2.32.0
>
>
On Tue, 2023-09-05 at 12:41 +0200, Peter Zijlstra wrote:
> On Thu, Jul 27, 2023 at 06:32:44AM -0700, Tim Chen wrote:
>
> > kernel/sched/fair.c | 18 ++++++++++++++++--
> > 1 file changed, 16 insertions(+), 2 deletions(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index a87988327f88..566686c5f2bd 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -9563,7 +9563,7 @@ static inline long sibling_imbalance(struct lb_env *env,
> > imbalance /= ncores_local + ncores_busiest;
> >
> > /* Take advantage of resource in an empty sched group */
> > - if (imbalance == 0 && local->sum_nr_running == 0 &&
> > + if (imbalance <= 1 && local->sum_nr_running == 0 &&
> > busiest->sum_nr_running > 1)
> > imbalance = 2;
> >
> > @@ -9751,6 +9751,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > break;
> >
> > case group_smt_balance:
> > + /* no idle cpus on both groups handled by group_fully_busy below */
> > + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> > + if (sgs->idle_cpus > busiest->idle_cpus)
> > + return false;
> > + if (sgs->idle_cpus < busiest->idle_cpus)
> > + return true;
> > + if (sgs->sum_nr_running <= busiest->sum_nr_running)
> > + return false;
> > + else
> > + return true;
> > + }
> > + goto fully_busy;
> > + break;
>
> This is really daft; why can't this simply be: fallthrough; ? At the
> very least that break must go.
>
>
Yes, the break should go. I was adding the goto to prevent compiler
from complaining about fall through code. The break no longer is needed.
Tim
From 81971a0b1eb64059756f00d8497b1865af2c0792 Mon Sep 17 00:00:00 2001
From: Tim Chen <tim.c.chen@linux.intel.com>
Date: Fri, 14 Jul 2023 16:09:30 -0700
Subject: [PATCH] sched/fair: Add SMT4 group_smt_balance handling
For SMT4, any group with more than 2 tasks will be marked as
group_smt_balance. Retain the behaviour of group_has_spare by marking
the busiest group as the group which has the least number of idle_cpus.
Also, handle rounding effect of adding (ncores_local + ncores_busy)
when the local is fully idle and busy group has more than 2 tasks.
Local group should try to pull at least 1 task in this case.
Fixes: fee1759e4f04 ("sched/fair: Determine active load balance for SMT sched groups")
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
kernel/sched/fair.c | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0b7445cd5af9..6e7ee2efc1ba 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9575,7 +9575,7 @@ static inline long sibling_imbalance(struct lb_env *env,
imbalance /= ncores_local + ncores_busiest;
/* Take advantage of resource in an empty sched group */
- if (imbalance == 0 && local->sum_nr_running == 0 &&
+ if (imbalance <= 1 && local->sum_nr_running == 0 &&
busiest->sum_nr_running > 1)
imbalance = 2;
@@ -9763,6 +9763,19 @@ static bool update_sd_pick_busiest(struct lb_env *env,
break;
case group_smt_balance:
+ /* no idle cpus on both groups handled by group_fully_busy below */
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
+ if (sgs->idle_cpus > busiest->idle_cpus)
+ return false;
+ if (sgs->idle_cpus < busiest->idle_cpus)
+ return true;
+ if (sgs->sum_nr_running <= busiest->sum_nr_running)
+ return false;
+ else
+ return true;
+ }
+ goto fully_busy;
+
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
@@ -9775,7 +9788,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* select the 1st one, except if @sg is composed of SMT
* siblings.
*/
-
+fully_busy:
if (sgs->avg_load < busiest->avg_load)
return false;
--
2.32.0
On Tue, Sep 05, 2023 at 10:54:09AM -0700, Tim Chen wrote: > > > + goto fully_busy; > > > + break; > > > > This is really daft; why can't this simply be: fallthrough; ? At the > > very least that break must go. > > > > > > Yes, the break should go. I was adding the goto to prevent compiler > from complaining about fall through code. But that's what we have the fallthrough keyword for, no?
On Wed, 2023-09-06 at 10:23 +0200, Peter Zijlstra wrote: > On Tue, Sep 05, 2023 at 10:54:09AM -0700, Tim Chen wrote: > > > > > + goto fully_busy; > > > > + break; > > > > > > This is really daft; why can't this simply be: fallthrough; ? At the > > > very least that break must go. > > > > > > > > > > Yes, the break should go. I was adding the goto to prevent compiler > > from complaining about fall through code. > > But that's what we have the fallthrough keyword for, no? Okay. Will update patch to use fallthrough once Shrikanth has a chance to test the update to use has_spare path for SMT4. Tim
On 7/27/23 7:02 PM, Tim Chen wrote:
> On Wed, 2023-07-26 at 20:11 -0700, Tim Chen wrote:
>> On Mon, 2023-07-17 at 20:28 +0530, Shrikanth Hegde wrote:
>>> From: Tim Chen <tim.c.chen@linux.intel.com>
>>>
>>> For SMT4, any group with more than 2 tasks will be marked as
>>> group_smt_balance. Retain the behaviour of group_has_spare by marking
>>> the busiest group as the group which has the least number of idle_cpus.
>>>
>>> Also, handle rounding effect of adding (ncores_local + ncores_busy)
>>> when the local is fully idle and busy group has more than 2 tasks.
>>> Local group should try to pull at least 1 task in this case.
>>>
>>> Originally-by: Tim Chen <tim.c.chen@linux.intel.com>
>>> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
>>> Signed-off-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
>>> ---
>>> kernel/sched/fair.c | 13 ++++++++++++-
>>> 1 file changed, 12 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>>> index 932e7b78894a..9502013abe33 100644
>>> --- a/kernel/sched/fair.c
>>> +++ b/kernel/sched/fair.c
>>> @@ -9532,7 +9532,7 @@ static inline long sibling_imbalance(struct lb_env *env,
>>> imbalance /= ncores_local + ncores_busiest;
>>>
>>> /* Take advantage of resource in an empty sched group */
>>> - if (imbalance == 0 && local->sum_nr_running == 0 &&
>>> + if (imbalance <= 1 && local->sum_nr_running == 0 &&
>>> busiest->sum_nr_running > 1)
>>> imbalance = 2;
>>>
>>> @@ -9720,6 +9720,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>> break;
>>>
>>> case group_smt_balance:
>>> + /* no idle cpus on both groups handled by group_fully_busy below */
>>> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
>>> + if (sgs->idle_cpus > busiest->idle_cpus)
>>> + return false;
>>> + if (sgs->idle_cpus < busiest->idle_cpus)
>>> + return true;
>>> + if (sgs->sum_nr_running <= busiest->sum_nr_running)
>>> + return false;
>>> + }
>>> + break;
>
> Shrikanth and Peter,
>
> Sorry, I acked Shrikanth's fixup patch too quickly without seeing that Shrikanth added
> a "break" in the patch above. My original code did not have that break statement as
> I did intend the code to fall through to the "group_fully_busy" code path when
> there are no idle cpus in both groups. To make the compiler happy and putting
> in the correct logic, I refresh the patch as below.
>
> Thanks.
>
> Tim
>
> From: Tim Chen <tim.c.chen@linux.intel.com>
> Date: Fri, 14 Jul 2023 16:09:30 -0700
> Subject: [PATCH] sched/fair: Add SMT4 group_smt_balance handling
>
> For SMT4, any group with more than 2 tasks will be marked as
> group_smt_balance. Retain the behaviour of group_has_spare by marking
> the busiest group as the group which has the least number of idle_cpus.
>
> Also, handle rounding effect of adding (ncores_local + ncores_busy)
> when the local is fully idle and busy group has more than 2 tasks.
> Local group should try to pull at least 1 task in this case.
>
> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> ---
> kernel/sched/fair.c | 18 ++++++++++++++++--
> 1 file changed, 16 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index a87988327f88..566686c5f2bd 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9563,7 +9563,7 @@ static inline long sibling_imbalance(struct lb_env *env,
> imbalance /= ncores_local + ncores_busiest;
>
> /* Take advantage of resource in an empty sched group */
> - if (imbalance == 0 && local->sum_nr_running == 0 &&
> + if (imbalance <= 1 && local->sum_nr_running == 0 &&
> busiest->sum_nr_running > 1)
> imbalance = 2;
>
> @@ -9751,6 +9751,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> break;
>
> case group_smt_balance:
> + /* no idle cpus on both groups handled by group_fully_busy below */
> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> + if (sgs->idle_cpus > busiest->idle_cpus)
> + return false;
> + if (sgs->idle_cpus < busiest->idle_cpus)
> + return true;
> + if (sgs->sum_nr_running <= busiest->sum_nr_running)
> + return false;
> + else
> + return true;
> + }
> + goto fully_busy;
> + break;
> +
> case group_fully_busy:
> /*
> * Select the fully busy group with highest avg_load. In
> @@ -9763,7 +9777,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> * select the 1st one, except if @sg is composed of SMT
> * siblings.
> */
> -
> +fully_busy:
> if (sgs->avg_load < busiest->avg_load)
> return false;
>
Hi Tim, Peter.
group_smt_balance(cluster scheduling), patches are in tip/sched/core. I dont
see this above patch there yet. Currently as is, this can cause function difference
in SMT4 systems( such as Power10).
Can we please have the above patch as well in tip/sched/core?
Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
On Mon, 2023-08-07 at 15:06 +0530, Shrikanth Hegde wrote:
> >
> > From: Tim Chen <tim.c.chen@linux.intel.com>
> > Date: Fri, 14 Jul 2023 16:09:30 -0700
> > Subject: [PATCH] sched/fair: Add SMT4 group_smt_balance handling
> >
> > For SMT4, any group with more than 2 tasks will be marked as
> > group_smt_balance. Retain the behaviour of group_has_spare by marking
> > the busiest group as the group which has the least number of idle_cpus.
> >
> > Also, handle rounding effect of adding (ncores_local + ncores_busy)
> > when the local is fully idle and busy group has more than 2 tasks.
> > Local group should try to pull at least 1 task in this case.
> >
> > Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> > ---
> > kernel/sched/fair.c | 18 ++++++++++++++++--
> > 1 file changed, 16 insertions(+), 2 deletions(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index a87988327f88..566686c5f2bd 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -9563,7 +9563,7 @@ static inline long sibling_imbalance(struct lb_env *env,
> > imbalance /= ncores_local + ncores_busiest;
> >
> > /* Take advantage of resource in an empty sched group */
> > - if (imbalance == 0 && local->sum_nr_running == 0 &&
> > + if (imbalance <= 1 && local->sum_nr_running == 0 &&
> > busiest->sum_nr_running > 1)
> > imbalance = 2;
> >
> > @@ -9751,6 +9751,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > break;
> >
> > case group_smt_balance:
> > + /* no idle cpus on both groups handled by group_fully_busy below */
> > + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> > + if (sgs->idle_cpus > busiest->idle_cpus)
> > + return false;
> > + if (sgs->idle_cpus < busiest->idle_cpus)
> > + return true;
> > + if (sgs->sum_nr_running <= busiest->sum_nr_running)
> > + return false;
> > + else
> > + return true;
> > + }
> > + goto fully_busy;
> > + break;
> > +
> > case group_fully_busy:
> > /*
> > * Select the fully busy group with highest avg_load. In
> > @@ -9763,7 +9777,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > * select the 1st one, except if @sg is composed of SMT
> > * siblings.
> > */
> > -
> > +fully_busy:
> > if (sgs->avg_load < busiest->avg_load)
> > return false;
> >
>
> Hi Tim, Peter.
>
> group_smt_balance(cluster scheduling), patches are in tip/sched/core. I dont
> see this above patch there yet. Currently as is, this can cause function difference
> in SMT4 systems( such as Power10).
>
> Can we please have the above patch as well in tip/sched/core?
>
> Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
Hi Peter,
Just back from my long vacation. Wonder if you have any comments on the above patch
for fixing the SMT4 case?
Tim
On 8/22/23 12:49 AM, Tim Chen wrote:
> On Mon, 2023-08-07 at 15:06 +0530, Shrikanth Hegde wrote:
>>>
>>> From: Tim Chen <tim.c.chen@linux.intel.com>
>>> Date: Fri, 14 Jul 2023 16:09:30 -0700
>>> Subject: [PATCH] sched/fair: Add SMT4 group_smt_balance handling
>>>
>>> For SMT4, any group with more than 2 tasks will be marked as
>>> group_smt_balance. Retain the behaviour of group_has_spare by marking
>>> the busiest group as the group which has the least number of idle_cpus.
>>>
>>> Also, handle rounding effect of adding (ncores_local + ncores_busy)
>>> when the local is fully idle and busy group has more than 2 tasks.
>>> Local group should try to pull at least 1 task in this case.
>>>
>>> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
>>> ---
>>> kernel/sched/fair.c | 18 ++++++++++++++++--
>>> 1 file changed, 16 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>>> index a87988327f88..566686c5f2bd 100644
>>> --- a/kernel/sched/fair.c
>>> +++ b/kernel/sched/fair.c
>>> @@ -9563,7 +9563,7 @@ static inline long sibling_imbalance(struct lb_env *env,
>>> imbalance /= ncores_local + ncores_busiest;
>>>
>>> /* Take advantage of resource in an empty sched group */
>>> - if (imbalance == 0 && local->sum_nr_running == 0 &&
>>> + if (imbalance <= 1 && local->sum_nr_running == 0 &&
>>> busiest->sum_nr_running > 1)
>>> imbalance = 2;
>>>
>>> @@ -9751,6 +9751,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>> break;
>>>
>>> case group_smt_balance:
>>> + /* no idle cpus on both groups handled by group_fully_busy below */
>>> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
>>> + if (sgs->idle_cpus > busiest->idle_cpus)
>>> + return false;
>>> + if (sgs->idle_cpus < busiest->idle_cpus)
>>> + return true;
>>> + if (sgs->sum_nr_running <= busiest->sum_nr_running)
>>> + return false;
>>> + else
>>> + return true;
>>> + }
>>> + goto fully_busy;
>>> + break;
>>> +
>>> case group_fully_busy:
>>> /*
>>> * Select the fully busy group with highest avg_load. In
>>> @@ -9763,7 +9777,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>> * select the 1st one, except if @sg is composed of SMT
>>> * siblings.
>>> */
>>> -
>>> +fully_busy:
>>> if (sgs->avg_load < busiest->avg_load)
>>> return false;
>>>
>>
>> Hi Tim, Peter.
>>
>> group_smt_balance(cluster scheduling), patches are in tip/sched/core. I dont
>> see this above patch there yet. Currently as is, this can cause function difference
>> in SMT4 systems( such as Power10).
>>
>> Can we please have the above patch as well in tip/sched/core?
>>
>> Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
>
> Hi Peter,
>
> Just back from my long vacation. Wonder if you have any comments on the above patch
> for fixing the SMT4 case?
>
> Tim
Hi Tim, Peter.
are there any concerns with the above patch for fixing the SMT4 case.
Currently the behavior is group_smt_balance is set for having even 2 tasks in
SMT4, ideally it should be same as the group_has_spare.
The above patch copies the same behavior to group_smt_balance.
>
On Tue, 2023-09-05 at 13:33 +0530, Shrikanth Hegde wrote:
>
> On 8/22/23 12:49 AM, Tim Chen wrote:
> > On Mon, 2023-08-07 at 15:06 +0530, Shrikanth Hegde wrote:
> > > >
> > > > From: Tim Chen <tim.c.chen@linux.intel.com>
> > > > Date: Fri, 14 Jul 2023 16:09:30 -0700
> > > > Subject: [PATCH] sched/fair: Add SMT4 group_smt_balance handling
> > > >
> > > > For SMT4, any group with more than 2 tasks will be marked as
> > > > group_smt_balance. Retain the behaviour of group_has_spare by marking
> > > > the busiest group as the group which has the least number of idle_cpus.
> > > >
> > > > Also, handle rounding effect of adding (ncores_local + ncores_busy)
> > > > when the local is fully idle and busy group has more than 2 tasks.
> > > > Local group should try to pull at least 1 task in this case.
> > > >
> > > > Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> > > > ---
> > > > kernel/sched/fair.c | 18 ++++++++++++++++--
> > > > 1 file changed, 16 insertions(+), 2 deletions(-)
> > > >
> > > > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > > > index a87988327f88..566686c5f2bd 100644
> > > > --- a/kernel/sched/fair.c
> > > > +++ b/kernel/sched/fair.c
> > > > @@ -9563,7 +9563,7 @@ static inline long sibling_imbalance(struct lb_env *env,
> > > > imbalance /= ncores_local + ncores_busiest;
> > > >
> > > > /* Take advantage of resource in an empty sched group */
> > > > - if (imbalance == 0 && local->sum_nr_running == 0 &&
> > > > + if (imbalance <= 1 && local->sum_nr_running == 0 &&
> > > > busiest->sum_nr_running > 1)
> > > > imbalance = 2;
> > > >
> > > > @@ -9751,6 +9751,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > > > break;
> > > >
> > > > case group_smt_balance:
> > > > + /* no idle cpus on both groups handled by group_fully_busy below */
> > > > + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> > > > + if (sgs->idle_cpus > busiest->idle_cpus)
> > > > + return false;
> > > > + if (sgs->idle_cpus < busiest->idle_cpus)
> > > > + return true;
> > > > + if (sgs->sum_nr_running <= busiest->sum_nr_running)
> > > > + return false;
> > > > + else
> > > > + return true;
> > > > + }
> > > > + goto fully_busy;
> > > > + break;
> > > > +
> > > > case group_fully_busy:
> > > > /*
> > > > * Select the fully busy group with highest avg_load. In
> > > > @@ -9763,7 +9777,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > > > * select the 1st one, except if @sg is composed of SMT
> > > > * siblings.
> > > > */
> > > > -
> > > > +fully_busy:
> > > > if (sgs->avg_load < busiest->avg_load)
> > > > return false;
> > > >
> > >
> > > Hi Tim, Peter.
> > >
> > > group_smt_balance(cluster scheduling), patches are in tip/sched/core. I dont
> > > see this above patch there yet. Currently as is, this can cause function difference
> > > in SMT4 systems( such as Power10).
> > >
> > > Can we please have the above patch as well in tip/sched/core?
> > >
> > > Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
> >
> > Hi Peter,
> >
> > Just back from my long vacation. Wonder if you have any comments on the above patch
> > for fixing the SMT4 case?
> >
> > Tim
>
> Hi Tim, Peter.
>
> are there any concerns with the above patch for fixing the SMT4 case.
> Currently the behavior is group_smt_balance is set for having even 2 tasks in
> SMT4, ideally it should be same as the group_has_spare.
>
> The above patch copies the same behavior to group_smt_balance.
> >
You mean simplify the patch as below? I think that should be fine. Can you
make sure it works for SMT4? And I can update the patch once you confirm it
works properly.
Tim
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6e7ee2efc1ba..48e9ab7f8a87 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9764,16 +9764,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
case group_smt_balance:
/* no idle cpus on both groups handled by group_fully_busy below */
- if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
- if (sgs->idle_cpus > busiest->idle_cpus)
- return false;
- if (sgs->idle_cpus < busiest->idle_cpus)
- return true;
- if (sgs->sum_nr_running <= busiest->sum_nr_running)
- return false;
- else
- return true;
- }
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
+ goto has_spare;
+
goto fully_busy;
case group_fully_busy:
@@ -9809,6 +9802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* as we do not want to pull task off SMT core with one task
* and make the core idle.
*/
+has_spare:
if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
return false;
On 9/6/23 12:07 AM, Tim Chen wrote:
> On Tue, 2023-09-05 at 13:33 +0530, Shrikanth Hegde wrote:
>>
>> On 8/22/23 12:49 AM, Tim Chen wrote:
>>> On Mon, 2023-08-07 at 15:06 +0530, Shrikanth Hegde wrote:
>>>>>
>>>>> From: Tim Chen <tim.c.chen@linux.intel.com>
>>>>> Date: Fri, 14 Jul 2023 16:09:30 -0700
>>>>> Subject: [PATCH] sched/fair: Add SMT4 group_smt_balance handling
>>>>>
>>>>> For SMT4, any group with more than 2 tasks will be marked as
>>>>> group_smt_balance. Retain the behaviour of group_has_spare by marking
>>>>> the busiest group as the group which has the least number of idle_cpus.
>>>>>
>>>>> Also, handle rounding effect of adding (ncores_local + ncores_busy)
>>>>> when the local is fully idle and busy group has more than 2 tasks.
>>>>> Local group should try to pull at least 1 task in this case.
>>>>>
>>>>> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
>>>>> ---
>>>>> kernel/sched/fair.c | 18 ++++++++++++++++--
>>>>> 1 file changed, 16 insertions(+), 2 deletions(-)
>>>>>
>>>>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>>>>> index a87988327f88..566686c5f2bd 100644
>>>>> --- a/kernel/sched/fair.c
>>>>> +++ b/kernel/sched/fair.c
>>>>> @@ -9563,7 +9563,7 @@ static inline long sibling_imbalance(struct lb_env *env,
>>>>> imbalance /= ncores_local + ncores_busiest;
>>>>>
>>>>> /* Take advantage of resource in an empty sched group */
>>>>> - if (imbalance == 0 && local->sum_nr_running == 0 &&
>>>>> + if (imbalance <= 1 && local->sum_nr_running == 0 &&
>>>>> busiest->sum_nr_running > 1)
>>>>> imbalance = 2;
>>>>>
>>>>> @@ -9751,6 +9751,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>>>> break;
>>>>>
>>>>> case group_smt_balance:
>>>>> + /* no idle cpus on both groups handled by group_fully_busy below */
>>>>> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
>>>>> + if (sgs->idle_cpus > busiest->idle_cpus)
>>>>> + return false;
>>>>> + if (sgs->idle_cpus < busiest->idle_cpus)
>>>>> + return true;
>>>>> + if (sgs->sum_nr_running <= busiest->sum_nr_running)
>>>>> + return false;
>>>>> + else
>>>>> + return true;
>>>>> + }
>>>>> + goto fully_busy;
>>>>> + break;
>>>>> +
>>>>> case group_fully_busy:
>>>>> /*
>>>>> * Select the fully busy group with highest avg_load. In
>>>>> @@ -9763,7 +9777,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>>>> * select the 1st one, except if @sg is composed of SMT
>>>>> * siblings.
>>>>> */
>>>>> -
>>>>> +fully_busy:
>>>>> if (sgs->avg_load < busiest->avg_load)
>>>>> return false;
>>>>>
>>>>
>>>> Hi Tim, Peter.
>>>>
>>>> group_smt_balance(cluster scheduling), patches are in tip/sched/core. I dont
>>>> see this above patch there yet. Currently as is, this can cause function difference
>>>> in SMT4 systems( such as Power10).
>>>>
>>>> Can we please have the above patch as well in tip/sched/core?
>>>>
>>>> Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
>>>
>>> Hi Peter,
>>>
>>> Just back from my long vacation. Wonder if you have any comments on the above patch
>>> for fixing the SMT4 case?
>>>
>>> Tim
>>
>> Hi Tim, Peter.
>>
>> are there any concerns with the above patch for fixing the SMT4 case.
>> Currently the behavior is group_smt_balance is set for having even 2 tasks in
>> SMT4, ideally it should be same as the group_has_spare.
>>
>> The above patch copies the same behavior to group_smt_balance.
>>>
>
> You mean simplify the patch as below? I think that should be fine. Can you
> make sure it works for SMT4? And I can update the patch once you confirm it
> works properly.
>
> Tim
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 6e7ee2efc1ba..48e9ab7f8a87 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9764,16 +9764,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>
> case group_smt_balance:
> /* no idle cpus on both groups handled by group_fully_busy below */
> - if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> - if (sgs->idle_cpus > busiest->idle_cpus)
> - return false;
> - if (sgs->idle_cpus < busiest->idle_cpus)
> - return true;
> - if (sgs->sum_nr_running <= busiest->sum_nr_running)
> - return false;
> - else
> - return true;
> - }
> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
> + goto has_spare;
> +
> goto fully_busy;
>
> case group_fully_busy:
> @@ -9809,6 +9802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> * as we do not want to pull task off SMT core with one task
> * and make the core idle.
> */
> +has_spare:
> if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
> if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
> return false;
>
>
>
Hi Tim,
In case you were waiting for my reply as inferred from other email.
The above change looks fine as well. This would avoid duplication of
code for group_smt_balance.
Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
On Thu, 2023-09-07 at 14:28 +0530, Shrikanth Hegde wrote:
> >
> > You mean simplify the patch as below? I think that should be fine. Can you
> > make sure it works for SMT4? And I can update the patch once you confirm it
> > works properly.
> >
> > Tim
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 6e7ee2efc1ba..48e9ab7f8a87 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -9764,16 +9764,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> >
> > case group_smt_balance:
> > /* no idle cpus on both groups handled by group_fully_busy below */
> > - if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> > - if (sgs->idle_cpus > busiest->idle_cpus)
> > - return false;
> > - if (sgs->idle_cpus < busiest->idle_cpus)
> > - return true;
> > - if (sgs->sum_nr_running <= busiest->sum_nr_running)
> > - return false;
> > - else
> > - return true;
> > - }
> > + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
> > + goto has_spare;
> > +
> > goto fully_busy;
> >
> > case group_fully_busy:
> > @@ -9809,6 +9802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > * as we do not want to pull task off SMT core with one task
> > * and make the core idle.
> > */
> > +has_spare:
> > if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
> > if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
> > return false;
> >
> >
> >
>
> Hi Tim,
>
> In case you were waiting for my reply as inferred from other email.
> The above change looks fine as well. This would avoid duplication of
> code for group_smt_balance.
>
> Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
Peter,
Here's the updated patch. Please consider it for inclusion.
Thanks.
Tim
From 979e261fed6e3765316a4de794f595f93c02cef0 Mon Sep 17 00:00:00 2001
From: Tim Chen <tim.c.chen@linux.intel.com>
Subject: [PATCH] sched/fair: Fix SMT4 group_smt_balance handling
To: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>, Vincent Guittot <vincent.guittot@linaro.org>, Ricardo Neri <ricardo.neri@intel.com>, Ravi V. Shankar <ravi.v.shankar@intel.com>, Ben Segall
<bsegall@google.com>, Daniel Bristot de Oliveira <bristot@redhat.com>, Dietmar Eggemann <dietmar.eggemann@arm.com>, Len Brown <len.brown@intel.com>, Mel Gorman <mgorman@suse.de>, Rafael J. Wysocki
<rafael.j.wysocki@intel.com>, Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>, Steven Rostedt <rostedt@goodmis.org>, Tim Chen <tim.c.chen@linux.intel.com>, Valentin Schneider
<vschneid@redhat.com>, Ionela Voinescu <ionela.voinescu@arm.com>, x86@kernel.org, linux-kernel@vger.kernel.org, Shrikanth Hegde <sshegde@linux.vnet.ibm.com>, Srikar Dronamraju
<srikar@linux.vnet.ibm.com>, naveen.n.rao@linux.vnet.ibm.com, Yicong Yang <yangyicong@hisilicon.com>, Barry Song <v-songbaohua@oppo.com>, Chen Yu <yu.c.chen@intel.com>, Hillf Danton <hdanton@sina.com>
For SMT4, any group with more than 2 tasks will be marked as
group_smt_balance. Retain the behaviour of group_has_spare by marking
the busiest group as the group which has the least number of idle_cpus.
Also, handle rounding effect of adding (ncores_local + ncores_busy) when
the local is fully idle and busy group imbalance is less than 2 tasks.
Local group should try to pull at least 1 task in this case so imbalance
should be set to 2 instead.
Fixes: fee1759e4f04 ("sched/fair: Determine active load balance for SMT sched groups")
Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
kernel/sched/fair.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0b7445cd5af9..fd9e594b5623 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9575,7 +9575,7 @@ static inline long sibling_imbalance(struct lb_env *env,
imbalance /= ncores_local + ncores_busiest;
/* Take advantage of resource in an empty sched group */
- if (imbalance == 0 && local->sum_nr_running == 0 &&
+ if (imbalance <= 1 && local->sum_nr_running == 0 &&
busiest->sum_nr_running > 1)
imbalance = 2;
@@ -9763,6 +9763,15 @@ static bool update_sd_pick_busiest(struct lb_env *env,
break;
case group_smt_balance:
+ /*
+ * Check if we have spare CPUs on either SMT group to
+ * choose has spare or fully busy handling.
+ */
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
+ goto has_spare;
+
+ fallthrough;
+
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
@@ -9802,6 +9811,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
else
return true;
}
+has_spare:
/*
* Select not overloaded group with lowest number of idle cpus
--
2.32.0
The following commit has been merged into the sched/urgent branch of tip:
Commit-ID: 450e749707bc1755f22b505d9cd942d4869dc535
Gitweb: https://git.kernel.org/tip/450e749707bc1755f22b505d9cd942d4869dc535
Author: Tim Chen <tim.c.chen@linux.intel.com>
AuthorDate: Thu, 07 Sep 2023 10:42:21 -07:00
Committer: Ingo Molnar <mingo@kernel.org>
CommitterDate: Wed, 13 Sep 2023 15:03:06 +02:00
sched/fair: Fix SMT4 group_smt_balance handling
For SMT4, any group with more than 2 tasks will be marked as
group_smt_balance. Retain the behaviour of group_has_spare by marking
the busiest group as the group which has the least number of idle_cpus.
Also, handle rounding effect of adding (ncores_local + ncores_busy) when
the local is fully idle and busy group imbalance is less than 2 tasks.
Local group should try to pull at least 1 task in this case so imbalance
should be set to 2 instead.
Fixes: fee1759e4f04 ("sched/fair: Determine active load balance for SMT sched groups")
Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: http://lkml.kernel.org/r/6cd1633036bb6b651af575c32c2a9608a106702c.camel@linux.intel.com
---
kernel/sched/fair.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 33a2b6b..cb22592 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9580,7 +9580,7 @@ static inline long sibling_imbalance(struct lb_env *env,
imbalance /= ncores_local + ncores_busiest;
/* Take advantage of resource in an empty sched group */
- if (imbalance == 0 && local->sum_nr_running == 0 &&
+ if (imbalance <= 1 && local->sum_nr_running == 0 &&
busiest->sum_nr_running > 1)
imbalance = 2;
@@ -9768,6 +9768,15 @@ static bool update_sd_pick_busiest(struct lb_env *env,
break;
case group_smt_balance:
+ /*
+ * Check if we have spare CPUs on either SMT group to
+ * choose has spare or fully busy handling.
+ */
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
+ goto has_spare;
+
+ fallthrough;
+
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
@@ -9807,6 +9816,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
else
return true;
}
+has_spare:
/*
* Select not overloaded group with lowest number of idle cpus
The following commit has been merged into the sched/urgent branch of tip:
Commit-ID: ad468232c3eb1dab163672f98a1ab2363be7981e
Gitweb: https://git.kernel.org/tip/ad468232c3eb1dab163672f98a1ab2363be7981e
Author: Tim Chen <tim.c.chen@linux.intel.com>
AuthorDate: Thu, 07 Sep 2023 10:42:21 -07:00
Committer: root <root@noisy.programming.kicks-ass.net>
CommitterDate: Sat, 09 Sep 2023 15:10:10 +02:00
sched/fair: Fix SMT4 group_smt_balance handling
For SMT4, any group with more than 2 tasks will be marked as
group_smt_balance. Retain the behaviour of group_has_spare by marking
the busiest group as the group which has the least number of idle_cpus.
Also, handle rounding effect of adding (ncores_local + ncores_busy) when
the local is fully idle and busy group imbalance is less than 2 tasks.
Local group should try to pull at least 1 task in this case so imbalance
should be set to 2 instead.
Fixes: fee1759e4f04 ("sched/fair: Determine active load balance for SMT sched groups")
Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/6cd1633036bb6b651af575c32c2a9608a106702c.camel@linux.intel.com
---
kernel/sched/fair.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 33a2b6b..cb22592 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9580,7 +9580,7 @@ static inline long sibling_imbalance(struct lb_env *env,
imbalance /= ncores_local + ncores_busiest;
/* Take advantage of resource in an empty sched group */
- if (imbalance == 0 && local->sum_nr_running == 0 &&
+ if (imbalance <= 1 && local->sum_nr_running == 0 &&
busiest->sum_nr_running > 1)
imbalance = 2;
@@ -9768,6 +9768,15 @@ static bool update_sd_pick_busiest(struct lb_env *env,
break;
case group_smt_balance:
+ /*
+ * Check if we have spare CPUs on either SMT group to
+ * choose has spare or fully busy handling.
+ */
+ if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
+ goto has_spare;
+
+ fallthrough;
+
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
@@ -9807,6 +9816,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
else
return true;
}
+has_spare:
/*
* Select not overloaded group with lowest number of idle cpus
On 9/6/23 12:07 AM, Tim Chen wrote:
> On Tue, 2023-09-05 at 13:33 +0530, Shrikanth Hegde wrote:
>>
>> On 8/22/23 12:49 AM, Tim Chen wrote:
>>> On Mon, 2023-08-07 at 15:06 +0530, Shrikanth Hegde wrote:
>>>>>
>>>>> From: Tim Chen <tim.c.chen@linux.intel.com>
>>>>> Date: Fri, 14 Jul 2023 16:09:30 -0700
>>>>> Subject: [PATCH] sched/fair: Add SMT4 group_smt_balance handling
>>>>>
>>>>> For SMT4, any group with more than 2 tasks will be marked as
>>>>> group_smt_balance. Retain the behaviour of group_has_spare by marking
>>>>> the busiest group as the group which has the least number of idle_cpus.
>>>>>
>>>>> Also, handle rounding effect of adding (ncores_local + ncores_busy)
>>>>> when the local is fully idle and busy group has more than 2 tasks.
>>>>> Local group should try to pull at least 1 task in this case.
>>>>>
>>>>> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
>>>>> ---
>>>>> kernel/sched/fair.c | 18 ++++++++++++++++--
>>>>> 1 file changed, 16 insertions(+), 2 deletions(-)
>>>>>
>>>>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>>>>> index a87988327f88..566686c5f2bd 100644
>>>>> --- a/kernel/sched/fair.c
>>>>> +++ b/kernel/sched/fair.c
>>>>> @@ -9563,7 +9563,7 @@ static inline long sibling_imbalance(struct lb_env *env,
>>>>> imbalance /= ncores_local + ncores_busiest;
>>>>>
>>>>> /* Take advantage of resource in an empty sched group */
>>>>> - if (imbalance == 0 && local->sum_nr_running == 0 &&
>>>>> + if (imbalance <= 1 && local->sum_nr_running == 0 &&
>>>>> busiest->sum_nr_running > 1)
>>>>> imbalance = 2;
>>>>>
>>>>> @@ -9751,6 +9751,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>>>> break;
>>>>>
>>>>> case group_smt_balance:
>>>>> + /* no idle cpus on both groups handled by group_fully_busy below */
>>>>> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
>>>>> + if (sgs->idle_cpus > busiest->idle_cpus)
>>>>> + return false;
>>>>> + if (sgs->idle_cpus < busiest->idle_cpus)
>>>>> + return true;
>>>>> + if (sgs->sum_nr_running <= busiest->sum_nr_running)
>>>>> + return false;
>>>>> + else
>>>>> + return true;
>>>>> + }
>>>>> + goto fully_busy;
>>>>> + break;
>>>>> +
>>>>> case group_fully_busy:
>>>>> /*
>>>>> * Select the fully busy group with highest avg_load. In
>>>>> @@ -9763,7 +9777,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>>>>> * select the 1st one, except if @sg is composed of SMT
>>>>> * siblings.
>>>>> */
>>>>> -
>>>>> +fully_busy:
>>>>> if (sgs->avg_load < busiest->avg_load)
>>>>> return false;
>>>>>
>>>>
>>>> Hi Tim, Peter.
>>>>
>>>> group_smt_balance(cluster scheduling), patches are in tip/sched/core. I dont
>>>> see this above patch there yet. Currently as is, this can cause function difference
>>>> in SMT4 systems( such as Power10).
>>>>
>>>> Can we please have the above patch as well in tip/sched/core?
>>>>
>>>> Acked-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
>>>
>>> Hi Peter,
>>>
>>> Just back from my long vacation. Wonder if you have any comments on the above patch
>>> for fixing the SMT4 case?
>>>
>>> Tim
>>
>> Hi Tim, Peter.
>>
>> are there any concerns with the above patch for fixing the SMT4 case.
>> Currently the behavior is group_smt_balance is set for having even 2 tasks in
>> SMT4, ideally it should be same as the group_has_spare.
>>
>> The above patch copies the same behavior to group_smt_balance.
>>>
>
> You mean simplify the patch as below? I think that should be fine. Can you
> make sure it works for SMT4? And I can update the patch once you confirm it
> works properly.
>
This looks fine. likely better as it would avoid duplication. A few nit below.
> Tim
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 6e7ee2efc1ba..48e9ab7f8a87 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9764,16 +9764,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>
> case group_smt_balance:
> /* no idle cpus on both groups handled by group_fully_busy below */
Please add a comment here explaining the fall-through and spare logic.
> - if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> - if (sgs->idle_cpus > busiest->idle_cpus)
> - return false;
> - if (sgs->idle_cpus < busiest->idle_cpus)
> - return true;
> - if (sgs->sum_nr_running <= busiest->sum_nr_running)
> - return false;
> - else
> - return true;
> - }
> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
> + goto has_spare;
> +
> goto fully_busy;
This can fall through without the additional goto statement no?
>
> case group_fully_busy:
> @@ -9809,6 +9802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> * as we do not want to pull task off SMT core with one task
> * and make the core idle.
> */
> +has_spare:
> if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
> if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
> return false;
>
>
>
On Wed, 2023-09-06 at 14:59 +0530, Shrikanth Hegde wrote:
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 6e7ee2efc1ba..48e9ab7f8a87 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -9764,16 +9764,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> >
> > case group_smt_balance:
> > /* no idle cpus on both groups handled by group_fully_busy below */
>
> Please add a comment here explaining the fall-through and spare logic.
>
Sure.
> > - if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> > - if (sgs->idle_cpus > busiest->idle_cpus)
> > - return false;
> > - if (sgs->idle_cpus < busiest->idle_cpus)
> > - return true;
> > - if (sgs->sum_nr_running <= busiest->sum_nr_running)
> > - return false;
> > - else
> > - return true;
> > - }
> > + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0)
> > + goto has_spare;
> > +
> > goto fully_busy;
>
> This can fall through without the additional goto statement no?
>
There is an unconditional goto fully_busy so won't fall through and
compiler won't complain.
> >
> > case group_fully_busy:
> > @@ -9809,6 +9802,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > * as we do not want to pull task off SMT core with one task
> > * and make the core idle.
> > */
> > +has_spare:
> > if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
> > if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
> > return false;
> >
> >
> >
Tim
On Tue, Sep 05, 2023 at 01:33:57PM +0530, Shrikanth Hegde wrote: > Hi Tim, Peter. Back from PTO; mailbox is a disaster area, but I'll try and have a look soon.
On Mon, Aug 21, 2023 at 12:19:40PM -0700, Tim Chen wrote:
> Just back from my long vacation. Wonder if you have any comments on the above patch
> for fixing the SMT4 case?
This should have:
Fixes: fee1759e4f04 ("sched/fair: Determine active load balance for SMT sched groups")
Right?
On 2023-07-15 01:05, Tim Chen wrote:
> On Fri, 2023-07-14 at 18:36 +0530, Shrikanth Hegde wrote:
>
>>
>>
>> If we consider symmetric platforms which have SMT4 such as power10.
>> we have a topology like below. multiple such MC will form DIE(PKG)
>>
>>
>> [0 2 4 6][1 3 5 7][8 10 12 14][9 11 13 15]
>> [--SMT--][--SMT--][----SMT---][---SMT----]
>> [--sg1--][--sg1--][---sg1----][---sg1----]
>> [--------------MC------------------------]
>>
>> In case of SMT4, if there is any group which has 2 or more tasks, that
>> group will be marked as group_smt_balance. previously, if that group
>> had 2
>> or 3 tasks, it would have been marked as group_has_spare. Since all
>> the groups have
>> SMT that means behavior would be same fully busy right? That can cause
>> some
>> corner cases. No?
>
> You raised a good point. I was looking from SMT2
> perspective so group_smt_balance implies group_fully_busy.
> That is no longer true for SMT4.
>
> I am thinking of the following fix on the current patch
> to take care of SMT4. Do you think this addresses
> concerns from you and Tobias?
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 294a662c9410..3fc8d3a3bd22 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9588,6 +9588,17 @@ static bool update_sd_pick_busiest(struct lb_env
> *env,
> break;
>
> case group_smt_balance:
> + /* no idle cpus on both groups handled by
> group_fully_busy below */
> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
> + if (sgs->idle_cpus > busiest->idle_cpus)
> + return false;
> + if (sgs->idle_cpus < busiest->idle_cpus)
> + return true;
> + if (sgs->sum_nr_running <=
> busiest_sum_nr_running)
> + return false;
> + else
> + return true;
> + }
>
>
> I will be on vacation next three weeks so my response will be slow.
>
> Tim
>
What if the setup is asymmetric, where SMT2 and SMT4 would mix, e.g.
[0 1][2 3 4 5]
[SMT][--SMT--]
If now CPUs 0,2,3 have a running task, both groups would be classified
as
smt_balance. But if it comes to the selection of the busiest group, the
smaller
group would be selected, as it has less idle CPUs, right? Which could
lead
to the smaller group being left with no tasks.
Using the absolute numbers of task is what made the prefer_sibling path
problematic,
I would assume that the same holds true here. Therefore, I would prefer
avg_load,
or, similar to prefer_siblings, a ratio over the number of cores.
I can't really test that on s390 as we always have SMT2. But, we can
have these
asymmetries on higher levels, e.g.
[0 1][2 3][4 5][6 7][8 9]
[SMT][SMT][SMT][SMT][SMT]
[-----core----][--core--]
For large configurations this can be true for even higher levels.
Therefore, the idea was to move the smt_balance state around and adapt
its
conditions to something like this (which would require to reorder the
commits):
@@ -8330,6 +8330,11 @@ enum fbq_type { regular, remote, all };
enum group_type {
/* The group has spare capacity that can be used to run more
tasks. */
group_has_spare = 0,
+ /*
+ * Balance SMT group that's fully busy. Can benefit from
migration
+ * a task on SMT with busy sibling to another CPU on idle core.
+ */
+ group_smt_balance,
/*
* The group is fully used and the tasks don't compete for more
CPU
* cycles. Nevertheless, some tasks might wait before running.
@@ -8340,11 +8345,6 @@ enum group_type {
* more powerful CPU.
*/
group_misfit_task,
- /*
- * Balance SMT group that's fully busy. Can benefit from
migration
- * a task on SMT with busy sibling to another CPU on idle core.
- */
- group_smt_balance,
/*
* SD_ASYM_PACKING only: One local CPU with higher capacity is
available,
* and the task should be migrated to it instead of running on
the
@@ -9327,15 +9327,15 @@ group_type group_classify(unsigned int
imbalance_pct,
if (sgs->group_asym_packing)
return group_asym_packing;
- if (sgs->group_smt_balance)
- return group_smt_balance;
-
if (sgs->group_misfit_task_load)
return group_misfit_task;
if (!group_has_capacity(imbalance_pct, sgs))
return group_fully_busy;
+ if (sgs->group_smt_balance)
+ return group_smt_balance;
+
return group_has_spare;
}
@@ -9457,8 +9457,7 @@ static inline bool smt_balance(struct lb_env *env,
struct sg_lb_stats *sgs,
* Note that if a group has a single SMT, SD_SHARE_CPUCAPACITY
* will not be on.
*/
- if (group->flags & SD_SHARE_CPUCAPACITY &&
- sgs->sum_h_nr_running > 1)
+ if (sgs->sum_h_nr_running > group->cores)
return true;
return false;
The s390 problem is currently solved by changing the prefer_sibling
path. When
disabling that flag, we might have an issue, will have to verify that
though.
>>
>> One example is Lets say sg1 has 4 tasks. and sg2 has 0 tasks and is
>> trying to do
>> load balance. Previously imbalance would have been 2, instead now
>> imbalance would be 1.
>> But in subsequent lb it would be balanced.
>>
>>
>>
>> > + return false;
>> > +}
>> > +
>> > static inline bool
>> > sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
>> > {
>> > @@ -9425,6 +9464,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
>> > sgs->group_asym_packing = 1;
>> > }
>> >
>> > + /* Check for loaded SMT group to be balanced to dst CPU */
>> > + if (!local_group && smt_balance(env, sgs, group))
>> > + sgs->group_smt_balance = 1;
>> > +
>> > sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
>> >
>> > /* Computing avg_load makes sense only when group is overloaded */
>> > @@ -9509,6 +9552,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>> > return false;
>> > break;
>> >
>> > + case group_smt_balance:
>> > case group_fully_busy:
>> > /*
>> > * Select the fully busy group with highest avg_load. In
>> > @@ -9537,6 +9581,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
>> > break;
>> >
>> > case group_has_spare:
>> > + /*
>> > + * Do not pick sg with SMT CPUs over sg with pure CPUs,
>> > + * as we do not want to pull task off SMT core with one task
>> > + * and make the core idle.
>> > + */
>> > + if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
>> > + if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
>> > + return false;
>> > + else
>> > + return true;> + }
>> > +
>> > /*
>> > * Select not overloaded group with lowest number of idle cpus
>> > * and highest number of running tasks. We could also compare
>> > @@ -9733,6 +9789,7 @@ static bool update_pick_idlest(struct sched_group *idlest,
>> >
>> > case group_imbalanced:
>> > case group_asym_packing:
>> > + case group_smt_balance:
>> > /* Those types are not used in the slow wakeup path */
>> > return false;
>> >
>> > @@ -9864,6 +9921,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
>> >
>> > case group_imbalanced:
>> > case group_asym_packing:
>> > + case group_smt_balance:
>> > /* Those type are not used in the slow wakeup path */
>> > return NULL;
>> >
>> > @@ -10118,6 +10176,13 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
>> > return;
>> > }
>> >
>> > + if (busiest->group_type == group_smt_balance) {
>> > + /* Reduce number of tasks sharing CPU capacity */
>> > + env->migration_type = migrate_task;
>> > + env->imbalance = 1;
>> > + return;
>> > + }
>> > +
>> > if (busiest->group_type == group_imbalanced) {
>> > /*
>> > * In the group_imb case we cannot rely on group-wide averages
>> > @@ -10363,16 +10428,23 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
>> > goto force_balance;
>> >
>> > if (busiest->group_type != group_overloaded) {
>> > - if (env->idle == CPU_NOT_IDLE)
>> > + if (env->idle == CPU_NOT_IDLE) {
>> > /*
>> > * If the busiest group is not overloaded (and as a
>> > * result the local one too) but this CPU is already
>> > * busy, let another idle CPU try to pull task.
>> > */
>> > goto out_balanced;
>> > + }
>> > +
>> > + if (busiest->group_type == group_smt_balance &&
>> > + smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
>> > + /* Let non SMT CPU pull from SMT CPU sharing with sibling */
>> > + goto force_balance;
>> > + }
>> >
>> > if (busiest->group_weight > 1 &&
>> > - local->idle_cpus <= (busiest->idle_cpus + 1))
>> > + local->idle_cpus <= (busiest->idle_cpus + 1)) {
>> > /*
>> > * If the busiest group is not overloaded
>> > * and there is no imbalance between this and busiest
>> > @@ -10383,12 +10455,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
>> > * there is more than 1 CPU per group.
>> > */
>> > goto out_balanced;
>> > + }
>> >
>> > - if (busiest->sum_h_nr_running == 1)
>> > + if (busiest->sum_h_nr_running == 1) {
>> > /*
>> > * busiest doesn't have any tasks waiting to run
>> > */
>> > goto out_balanced;
>> > + }
>> > }
>> >
>> > force_balance:
On 7/18/23 11:37 AM, Tobias Huschle wrote:
> On 2023-07-15 01:05, Tim Chen wrote:
>> On Fri, 2023-07-14 at 18:36 +0530, Shrikanth Hegde wrote:
>>
>>>
>>>
>>> If we consider symmetric platforms which have SMT4 such as power10.
>>> we have a topology like below. multiple such MC will form DIE(PKG)
>>>
>>>
>>> [0 2 4 6][1 3 5 7][8 10 12 14][9 11 13 15]
>>> [--SMT--][--SMT--][----SMT---][---SMT----]
>>> [--sg1--][--sg1--][---sg1----][---sg1----]
>>> [--------------MC------------------------]
>>>
>>> In case of SMT4, if there is any group which has 2 or more tasks, that
>>> group will be marked as group_smt_balance. previously, if that group
>>> had 2
>>> or 3 tasks, it would have been marked as group_has_spare. Since all
>>> the groups have
>>> SMT that means behavior would be same fully busy right? That can
>>> cause some
>>> corner cases. No?
>>
>> You raised a good point. I was looking from SMT2
>> perspective so group_smt_balance implies group_fully_busy.
>> That is no longer true for SMT4.
>>
>> I am thinking of the following fix on the current patch
>> to take care of SMT4. Do you think this addresses
>> concerns from you and Tobias?
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 294a662c9410..3fc8d3a3bd22 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -9588,6 +9588,17 @@ static bool update_sd_pick_busiest(struct
>> lb_env *env,
>> break;
>>
>> case group_smt_balance:
>> + /* no idle cpus on both groups handled by
>> group_fully_busy below */
>> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
>> + if (sgs->idle_cpus > busiest->idle_cpus)
>> + return false;
>> + if (sgs->idle_cpus < busiest->idle_cpus)
>> + return true;
>> + if (sgs->sum_nr_running <=
>> busiest_sum_nr_running)
>> + return false;
>> + else
>> + return true;
>> + }
>>
>>
>> I will be on vacation next three weeks so my response will be slow.
>>
>> Tim
>>
>
> What if the setup is asymmetric, where SMT2 and SMT4 would mix, e.g.
>
> [0 1][2 3 4 5]
> [SMT][--SMT--]
>
> If now CPUs 0,2,3 have a running task, both groups would be classified as
> smt_balance. But if it comes to the selection of the busiest group, the
> smaller
> group would be selected, as it has less idle CPUs, right? Which could lead
> to the smaller group being left with no tasks.
> Using the absolute numbers of task is what made the prefer_sibling path
> problematic,
Yes. But Not sure how realistic is that configuration. on power10, we typically
have all cores in either SMT1, SMT2 or SMT4. But not mixed configs.
One can offline a CPUs to get into that cases in SMT4.
> I would assume that the same holds true here. Therefore, I would prefer
> avg_load,
> or, similar to prefer_siblings, a ratio over the number of cores.
>
> I can't really test that on s390 as we always have SMT2. But, we can
> have these
> asymmetries on higher levels, e.g
IIUC, on higher levels, group will not have SD_SHARE_CPUCAPACITY, so it shouldn't
run into group_smt_balance.
>
> [0 1][2 3][4 5][6 7][8 9]
> [SMT][SMT][SMT][SMT][SMT]
> [-----core----][--core--]
>
> For large configurations this can be true for even higher levels.
> Therefore, the idea was to move the smt_balance state around and adapt its
> conditions to something like this (which would require to reorder the
> commits):
>
> @@ -8330,6 +8330,11 @@ enum fbq_type { regular, remote, all };
> enum group_type {
> /* The group has spare capacity that can be used to run more
> tasks. */
> group_has_spare = 0,
> + /*
> + * Balance SMT group that's fully busy. Can benefit from migration
> + * a task on SMT with busy sibling to another CPU on idle core.
> + */
> + group_smt_balance,
> /*
> * The group is fully used and the tasks don't compete for more CPU
> * cycles. Nevertheless, some tasks might wait before running.
> @@ -8340,11 +8345,6 @@ enum group_type {
> * more powerful CPU.
> */
> group_misfit_task,
> - /*
> - * Balance SMT group that's fully busy. Can benefit from migration
> - * a task on SMT with busy sibling to another CPU on idle core.
> - */
> - group_smt_balance,
> /*
> * SD_ASYM_PACKING only: One local CPU with higher capacity is
> available,
IIUC, for cluster topology of this patch, busiest group should be a SMT if it has 2
threads compared to an Atom cluster having 4 threads. Atom cluster will be group_fully_busy,
whereas SMT group will be group_smt_balance. For that to happen group_smt_balance should have
higher group_type.
> * and the task should be migrated to it instead of running on the
> @@ -9327,15 +9327,15 @@ group_type group_classify(unsigned int
> imbalance_pct,
> if (sgs->group_asym_packing)
> return group_asym_packing;
>
> - if (sgs->group_smt_balance)
> - return group_smt_balance;
> -
> if (sgs->group_misfit_task_load)
> return group_misfit_task;
>
> if (!group_has_capacity(imbalance_pct, sgs))
> return group_fully_busy;
>
> + if (sgs->group_smt_balance)
> + return group_smt_balance;
> +
> return group_has_spare;
> }
>
> @@ -9457,8 +9457,7 @@ static inline bool smt_balance(struct lb_env *env,
> struct sg_lb_stats *sgs,
> * Note that if a group has a single SMT, SD_SHARE_CPUCAPACITY
> * will not be on.
> */
> - if (group->flags & SD_SHARE_CPUCAPACITY &&
> - sgs->sum_h_nr_running > 1)
> + if (sgs->sum_h_nr_running > group->cores)
In case of Power10, where we have SMT4, group->cores will be 1. I dont see
a difference here.
> return true;
>
> return false;
>
> The s390 problem is currently solved by changing the prefer_sibling
> path. When
> disabling that flag, we might have an issue, will have to verify that
> though.
>
>>>
>>> One example is Lets say sg1 has 4 tasks. and sg2 has 0 tasks and is
>>> trying to do
>>> load balance. Previously imbalance would have been 2, instead now
>>> imbalance would be 1.
>>> But in subsequent lb it would be balanced.
>>>
>>>
>>>
>>> > + return false;
>>> > +}
>>> > +
>>> > static inline bool
>>> > sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
>>> > {
>>> > @@ -9425,6 +9464,10 @@ static inline void update_sg_lb_stats(struct
>>> lb_env *env,
>>> > sgs->group_asym_packing = 1;
>>> > }
>>> >
>>> > + /* Check for loaded SMT group to be balanced to dst CPU */
>>> > + if (!local_group && smt_balance(env, sgs, group))
>>> > + sgs->group_smt_balance = 1;
>>> > +
>>> > sgs->group_type = group_classify(env->sd->imbalance_pct,
>>> group, sgs);
>>> >
>>> > /* Computing avg_load makes sense only when group is
>>> overloaded */
>>> > @@ -9509,6 +9552,7 @@ static bool update_sd_pick_busiest(struct
>>> lb_env *env,
>>> > return false;
>>> > break;
>>> >
>>> > + case group_smt_balance:
>>> > case group_fully_busy:
>>> > /*
>>> > * Select the fully busy group with highest avg_load. In
>>> > @@ -9537,6 +9581,18 @@ static bool update_sd_pick_busiest(struct
>>> lb_env *env,
>>> > break;
>>> >
>>> > case group_has_spare:
>>> > + /*
>>> > + * Do not pick sg with SMT CPUs over sg with pure CPUs,
>>> > + * as we do not want to pull task off SMT core with one task
>>> > + * and make the core idle.
>>> > + */
>>> > + if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
>>> > + if (sg->flags & SD_SHARE_CPUCAPACITY &&
>>> sgs->sum_h_nr_running <= 1)
>>> > + return false;
>>> > + else
>>> > + return true;> + }
>>> > +
>>> > /*
>>> > * Select not overloaded group with lowest number of idle
>>> cpus
>>> > * and highest number of running tasks. We could also compare
>>> > @@ -9733,6 +9789,7 @@ static bool update_pick_idlest(struct
>>> sched_group *idlest,
>>> >
>>> > case group_imbalanced:
>>> > case group_asym_packing:
>>> > + case group_smt_balance:
>>> > /* Those types are not used in the slow wakeup path */
>>> > return false;
>>> >
>>> > @@ -9864,6 +9921,7 @@ find_idlest_group(struct sched_domain *sd,
>>> struct task_struct *p, int this_cpu)
>>> >
>>> > case group_imbalanced:
>>> > case group_asym_packing:
>>> > + case group_smt_balance:
>>> > /* Those type are not used in the slow wakeup path */
>>> > return NULL;
>>> >
>>> > @@ -10118,6 +10176,13 @@ static inline void
>>> calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
>>> > return;
>>> > }
>>> >
>>> > + if (busiest->group_type == group_smt_balance) {
>>> > + /* Reduce number of tasks sharing CPU capacity */
>>> > + env->migration_type = migrate_task;
>>> > + env->imbalance = 1;
>>> > + return;
>>> > + }
>>> > +
>>> > if (busiest->group_type == group_imbalanced) {
>>> > /*
>>> > * In the group_imb case we cannot rely on group-wide
>>> averages
>>> > @@ -10363,16 +10428,23 @@ static struct sched_group
>>> *find_busiest_group(struct lb_env *env)
>>> > goto force_balance;
>>> >
>>> > if (busiest->group_type != group_overloaded) {
>>> > - if (env->idle == CPU_NOT_IDLE)
>>> > + if (env->idle == CPU_NOT_IDLE) {
>>> > /*
>>> > * If the busiest group is not overloaded (and as a
>>> > * result the local one too) but this CPU is already
>>> > * busy, let another idle CPU try to pull task.
>>> > */
>>> > goto out_balanced;
>>> > + }
>>> > +
>>> > + if (busiest->group_type == group_smt_balance &&
>>> > + smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
>>> > + /* Let non SMT CPU pull from SMT CPU sharing with
>>> sibling */
>>> > + goto force_balance;
>>> > + }
>>> >
>>> > if (busiest->group_weight > 1 &&
>>> > - local->idle_cpus <= (busiest->idle_cpus + 1))
>>> > + local->idle_cpus <= (busiest->idle_cpus + 1)) {
>>> > /*
>>> > * If the busiest group is not overloaded
>>> > * and there is no imbalance between this and busiest
>>> > @@ -10383,12 +10455,14 @@ static struct sched_group
>>> *find_busiest_group(struct lb_env *env)
>>> > * there is more than 1 CPU per group.
>>> > */
>>> > goto out_balanced;
>>> > + }
>>> >
>>> > - if (busiest->sum_h_nr_running == 1)
>>> > + if (busiest->sum_h_nr_running == 1) {
>>> > /*
>>> > * busiest doesn't have any tasks waiting to run
>>> > */
>>> > goto out_balanced;
>>> > + }
>>> > }
>>> >
>>> > force_balance:
On 2023-07-18 16:52, Shrikanth Hegde wrote:
> On 7/18/23 11:37 AM, Tobias Huschle wrote:
>> On 2023-07-15 01:05, Tim Chen wrote:
>>> On Fri, 2023-07-14 at 18:36 +0530, Shrikanth Hegde wrote:
>>>
>>>>
>>>>
>>>> If we consider symmetric platforms which have SMT4 such as power10.
>>>> we have a topology like below. multiple such MC will form DIE(PKG)
>>>>
>>>>
>>>> [0 2 4 6][1 3 5 7][8 10 12 14][9 11 13 15]
>>>> [--SMT--][--SMT--][----SMT---][---SMT----]
>>>> [--sg1--][--sg1--][---sg1----][---sg1----]
>>>> [--------------MC------------------------]
>>>>
>>>> In case of SMT4, if there is any group which has 2 or more tasks,
>>>> that
>>>> group will be marked as group_smt_balance. previously, if that group
>>>> had 2
>>>> or 3 tasks, it would have been marked as group_has_spare. Since all
>>>> the groups have
>>>> SMT that means behavior would be same fully busy right? That can
>>>> cause some
>>>> corner cases. No?
>>>
>>> You raised a good point. I was looking from SMT2
>>> perspective so group_smt_balance implies group_fully_busy.
>>> That is no longer true for SMT4.
>>>
>>> I am thinking of the following fix on the current patch
>>> to take care of SMT4. Do you think this addresses
>>> concerns from you and Tobias?
>>>
>>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>>> index 294a662c9410..3fc8d3a3bd22 100644
>>> --- a/kernel/sched/fair.c
>>> +++ b/kernel/sched/fair.c
>>> @@ -9588,6 +9588,17 @@ static bool update_sd_pick_busiest(struct
>>> lb_env *env,
>>> break;
>>>
>>> case group_smt_balance:
>>> + /* no idle cpus on both groups handled by
>>> group_fully_busy below */
>>> + if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) {
>>> + if (sgs->idle_cpus > busiest->idle_cpus)
>>> + return false;
>>> + if (sgs->idle_cpus < busiest->idle_cpus)
>>> + return true;
>>> + if (sgs->sum_nr_running <=
>>> busiest_sum_nr_running)
>>> + return false;
>>> + else
>>> + return true;
>>> + }
>>>
>>>
>>> I will be on vacation next three weeks so my response will be slow.
>>>
>>> Tim
>>>
>>
>> What if the setup is asymmetric, where SMT2 and SMT4 would mix, e.g.
>>
>> [0 1][2 3 4 5]
>> [SMT][--SMT--]
>>
>> If now CPUs 0,2,3 have a running task, both groups would be classified
>> as
>> smt_balance. But if it comes to the selection of the busiest group,
>> the
>> smaller
>> group would be selected, as it has less idle CPUs, right? Which could
>> lead
>> to the smaller group being left with no tasks.
>> Using the absolute numbers of task is what made the prefer_sibling
>> path
>> problematic,
>
>
> Yes. But Not sure how realistic is that configuration. on power10, we
> typically
> have all cores in either SMT1, SMT2 or SMT4. But not mixed configs.
> One can offline a CPUs to get into that cases in SMT4.
I'm also not sure if there is a real case for that. The assumption that
two groups
are always of equal size was the issue why the prefer_sibling path did
not work as
expected. I just wanted to point out that we might introduce a similar
assumption
here again. It might be valid to assume that if there are no usecases
for having
two cores with a different number of SMT threads.
>
>> I would assume that the same holds true here. Therefore, I would
>> prefer
>> avg_load,
>> or, similar to prefer_siblings, a ratio over the number of cores.
>>
>> I can't really test that on s390 as we always have SMT2. But, we can
>> have these
>> asymmetries on higher levels, e.g
>
>
> IIUC, on higher levels, group will not have SD_SHARE_CPUCAPACITY, so
> it shouldn't
> run into group_smt_balance.
>
>>
>> [0 1][2 3][4 5][6 7][8 9]
>> [SMT][SMT][SMT][SMT][SMT]
>> [-----core----][--core--]
>>
>> For large configurations this can be true for even higher levels.
>> Therefore, the idea was to move the smt_balance state around and adapt
>> its
>> conditions to something like this (which would require to reorder the
>> commits):
>>
>> @@ -8330,6 +8330,11 @@ enum fbq_type { regular, remote, all };
>> enum group_type {
>> /* The group has spare capacity that can be used to run more
>> tasks. */
>> group_has_spare = 0,
>> + /*
>> + * Balance SMT group that's fully busy. Can benefit from
>> migration
>> + * a task on SMT with busy sibling to another CPU on idle
>> core.
>> + */
>> + group_smt_balance,
>> /*
>> * The group is fully used and the tasks don't compete for
>> more CPU
>> * cycles. Nevertheless, some tasks might wait before running.
>> @@ -8340,11 +8345,6 @@ enum group_type {
>> * more powerful CPU.
>> */
>> group_misfit_task,
>> - /*
>> - * Balance SMT group that's fully busy. Can benefit from
>> migration
>> - * a task on SMT with busy sibling to another CPU on idle
>> core.
>> - */
>> - group_smt_balance,
>> /*
>> * SD_ASYM_PACKING only: One local CPU with higher capacity is
>> available,
>
>
> IIUC, for cluster topology of this patch, busiest group should be a
> SMT if it has 2
> threads compared to an Atom cluster having 4 threads. Atom cluster
> will be group_fully_busy,
> whereas SMT group will be group_smt_balance. For that to happen
> group_smt_balance should have
> higher group_type.
Makes sense.
>
>> * and the task should be migrated to it instead of running on
>> the
>> @@ -9327,15 +9327,15 @@ group_type group_classify(unsigned int
>> imbalance_pct,
>> if (sgs->group_asym_packing)
>> return group_asym_packing;
>>
>> - if (sgs->group_smt_balance)
>> - return group_smt_balance;
>> -
>> if (sgs->group_misfit_task_load)
>> return group_misfit_task;
>>
>> if (!group_has_capacity(imbalance_pct, sgs))
>> return group_fully_busy;
>>
>> + if (sgs->group_smt_balance)
>> + return group_smt_balance;
>> +
>> return group_has_spare;
>> }
>>
>> @@ -9457,8 +9457,7 @@ static inline bool smt_balance(struct lb_env
>> *env,
>> struct sg_lb_stats *sgs,
>> * Note that if a group has a single SMT, SD_SHARE_CPUCAPACITY
>> * will not be on.
>> */
>> - if (group->flags & SD_SHARE_CPUCAPACITY &&
>> - sgs->sum_h_nr_running > 1)
>> + if (sgs->sum_h_nr_running > group->cores)
>
> In case of Power10, where we have SMT4, group->cores will be 1. I dont
> see
> a difference here.
The aim of this change was to also make use of this further up in the
hierarchy,
where SD_SHARE_CPUCAPACITY is not set. Up there, it would be possible to
have
more than one core, also potentially different numbers (at least on
s390).
It appears to work fine without these changes though, so I think there
is
nothing to do for now.
>
>> return true;
>>
>> return false;
>>
>> The s390 problem is currently solved by changing the prefer_sibling
>> path. When
>> disabling that flag, we might have an issue, will have to verify that
>> though.
>>
>>>>
>>>> One example is Lets say sg1 has 4 tasks. and sg2 has 0 tasks and is
>>>> trying to do
>>>> load balance. Previously imbalance would have been 2, instead now
>>>> imbalance would be 1.
>>>> But in subsequent lb it would be balanced.
>>>>
>>>>
>>>>
>>>> > + return false;
>>>> > +}
>>>> > +
>>>> > static inline bool
>>>> > sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
>>>> > {
>>>> > @@ -9425,6 +9464,10 @@ static inline void update_sg_lb_stats(struct
>>>> lb_env *env,
>>>> > sgs->group_asym_packing = 1;
>>>> > }
>>>> >
>>>> > + /* Check for loaded SMT group to be balanced to dst CPU */
>>>> > + if (!local_group && smt_balance(env, sgs, group))
>>>> > + sgs->group_smt_balance = 1;
>>>> > +
>>>> > sgs->group_type = group_classify(env->sd->imbalance_pct,
>>>> group, sgs);
>>>> >
>>>> > /* Computing avg_load makes sense only when group is
>>>> overloaded */
>>>> > @@ -9509,6 +9552,7 @@ static bool update_sd_pick_busiest(struct
>>>> lb_env *env,
>>>> > return false;
>>>> > break;
>>>> >
>>>> > + case group_smt_balance:
>>>> > case group_fully_busy:
>>>> > /*
>>>> > * Select the fully busy group with highest avg_load. In
>>>> > @@ -9537,6 +9581,18 @@ static bool update_sd_pick_busiest(struct
>>>> lb_env *env,
>>>> > break;
>>>> >
>>>> > case group_has_spare:
>>>> > + /*
>>>> > + * Do not pick sg with SMT CPUs over sg with pure CPUs,
>>>> > + * as we do not want to pull task off SMT core with one task
>>>> > + * and make the core idle.
>>>> > + */
>>>> > + if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
>>>> > + if (sg->flags & SD_SHARE_CPUCAPACITY &&
>>>> sgs->sum_h_nr_running <= 1)
>>>> > + return false;
>>>> > + else
>>>> > + return true;> + }
>>>> > +
>>>> > /*
>>>> > * Select not overloaded group with lowest number of idle
>>>> cpus
>>>> > * and highest number of running tasks. We could also compare
>>>> > @@ -9733,6 +9789,7 @@ static bool update_pick_idlest(struct
>>>> sched_group *idlest,
>>>> >
>>>> > case group_imbalanced:
>>>> > case group_asym_packing:
>>>> > + case group_smt_balance:
>>>> > /* Those types are not used in the slow wakeup path */
>>>> > return false;
>>>> >
>>>> > @@ -9864,6 +9921,7 @@ find_idlest_group(struct sched_domain *sd,
>>>> struct task_struct *p, int this_cpu)
>>>> >
>>>> > case group_imbalanced:
>>>> > case group_asym_packing:
>>>> > + case group_smt_balance:
>>>> > /* Those type are not used in the slow wakeup path */
>>>> > return NULL;
>>>> >
>>>> > @@ -10118,6 +10176,13 @@ static inline void
>>>> calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
>>>> > return;
>>>> > }
>>>> >
>>>> > + if (busiest->group_type == group_smt_balance) {
>>>> > + /* Reduce number of tasks sharing CPU capacity */
>>>> > + env->migration_type = migrate_task;
>>>> > + env->imbalance = 1;
>>>> > + return;
>>>> > + }
>>>> > +
>>>> > if (busiest->group_type == group_imbalanced) {
>>>> > /*
>>>> > * In the group_imb case we cannot rely on group-wide
>>>> averages
>>>> > @@ -10363,16 +10428,23 @@ static struct sched_group
>>>> *find_busiest_group(struct lb_env *env)
>>>> > goto force_balance;
>>>> >
>>>> > if (busiest->group_type != group_overloaded) {
>>>> > - if (env->idle == CPU_NOT_IDLE)
>>>> > + if (env->idle == CPU_NOT_IDLE) {
>>>> > /*
>>>> > * If the busiest group is not overloaded (and as a
>>>> > * result the local one too) but this CPU is already
>>>> > * busy, let another idle CPU try to pull task.
>>>> > */
>>>> > goto out_balanced;
>>>> > + }
>>>> > +
>>>> > + if (busiest->group_type == group_smt_balance &&
>>>> > + smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
>>>> > + /* Let non SMT CPU pull from SMT CPU sharing with
>>>> sibling */
>>>> > + goto force_balance;
>>>> > + }
>>>> >
>>>> > if (busiest->group_weight > 1 &&
>>>> > - local->idle_cpus <= (busiest->idle_cpus + 1))
>>>> > + local->idle_cpus <= (busiest->idle_cpus + 1)) {
>>>> > /*
>>>> > * If the busiest group is not overloaded
>>>> > * and there is no imbalance between this and busiest
>>>> > @@ -10383,12 +10455,14 @@ static struct sched_group
>>>> *find_busiest_group(struct lb_env *env)
>>>> > * there is more than 1 CPU per group.
>>>> > */
>>>> > goto out_balanced;
>>>> > + }
>>>> >
>>>> > - if (busiest->sum_h_nr_running == 1)
>>>> > + if (busiest->sum_h_nr_running == 1) {
>>>> > /*
>>>> > * busiest doesn't have any tasks waiting to run
>>>> > */
>>>> > goto out_balanced;
>>>> > + }
>>>> > }
>>>> >
>>>> > force_balance:
On 2023-07-08 00:57, Tim Chen wrote:
> From: Tim C Chen <tim.c.chen@linux.intel.com>
>
> On hybrid CPUs with scheduling cluster enabled, we will need to
> consider balancing between SMT CPU cluster, and Atom core cluster.
>
> Below shows such a hybrid x86 CPU with 4 big cores and 8 atom cores.
> Each scheduling cluster span a L2 cache.
>
> --L2-- --L2-- --L2-- --L2-- ----L2---- -----L2------
> [0, 1] [2, 3] [4, 5] [5, 6] [7 8 9 10] [11 12 13 14]
> Big Big Big Big Atom Atom
> core core core core Module Module
>
> If the busiest group is a big core with both SMT CPUs busy, we should
> active load balance if destination group has idle CPU cores. Such
> condition is considered by asym_active_balance() in load balancing but
> not
> considered when looking for busiest group and computing load imbalance.
> Add this consideration in find_busiest_group() and
> calculate_imbalance().
>
> In addition, update the logic determining the busier group when one
> group
> is SMT and the other group is non SMT but both groups are partially
> busy
> with idle CPU. The busier group should be the group with idle cores
> rather
> than the group with one busy SMT CPU. We do not want to make the SMT
> group
> the busiest one to pull the only task off SMT CPU and causing the whole
> core to
> go empty.
>
> Otherwise suppose in the search for the busiest group, we first
> encounter
> an SMT group with 1 task and set it as the busiest. The destination
> group is an atom cluster with 1 task and we next encounter an atom
> cluster group with 3 tasks, we will not pick this atom cluster over the
> SMT group, even though we should. As a result, we do not load balance
> the busier Atom cluster (with 3 tasks) towards the local atom cluster
> (with 1 task). And it doesn't make sense to pick the 1 task SMT group
> as the busier group as we also should not pull task off the SMT towards
> the 1 task atom cluster and make the SMT core completely empty.
>
> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> ---
> kernel/sched/fair.c | 80 +++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 77 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 87317634fab2..f636d6c09dc6 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -8279,6 +8279,11 @@ enum group_type {
> * more powerful CPU.
> */
> group_misfit_task,
> + /*
> + * Balance SMT group that's fully busy. Can benefit from migration
> + * a task on SMT with busy sibling to another CPU on idle core.
> + */
> + group_smt_balance,
Would it make sense to move smt_balance?, s.t. we get:
group_has_spare < group_smt_balance < group_fully_busy
Conceptually I would be more intuitive to me like this as the
smt_balance groups are more busy than has_spare ones, but less busy
then fully_busy ones.
From a functional perspective I could also see some impact when
update_sd_pick_busiest compares the group types. In that case we
would remove tasks from fully busy groups before moving them
from smt_balance groups. Not sure which way would be to prefer
to increase overall throughput.
Since smt_balance is only selected if the group has SMT, this
should still not pull the last task off of a non-SMT CPU.
> /*
> * SD_ASYM_PACKING only: One local CPU with higher capacity is
> available,
> * and the task should be migrated to it instead of running on the
> @@ -8987,6 +8992,7 @@ struct sg_lb_stats {
> unsigned int group_weight;
> enum group_type group_type;
> unsigned int group_asym_packing; /* Tasks should be moved to
> preferred CPU */
> + unsigned int group_smt_balance; /* Task on busy SMT be moved */
> unsigned long group_misfit_task_load; /* A CPU has a task too big
> for its capacity */
> #ifdef CONFIG_NUMA_BALANCING
> unsigned int nr_numa_running;
> @@ -9260,6 +9266,9 @@ group_type group_classify(unsigned int
> imbalance_pct,
> if (sgs->group_asym_packing)
> return group_asym_packing;
>
> + if (sgs->group_smt_balance)
> + return group_smt_balance;
> +
> if (sgs->group_misfit_task_load)
> return group_misfit_task;
>
> @@ -9333,6 +9342,36 @@ sched_asym(struct lb_env *env, struct
> sd_lb_stats *sds, struct sg_lb_stats *sgs
> return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
> }
>
> +/* One group has more than one SMT CPU while the other group does not
> */
> +static inline bool smt_vs_nonsmt_groups(struct sched_group *sg1,
> + struct sched_group *sg2)
> +{
> + if (!sg1 || !sg2)
> + return false;
> +
> + return (sg1->flags & SD_SHARE_CPUCAPACITY) !=
> + (sg2->flags & SD_SHARE_CPUCAPACITY);
> +}
> +
> +static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats
> *sgs,
> + struct sched_group *group)
> +{
> + if (env->idle == CPU_NOT_IDLE)
> + return false;
> +
> + /*
> + * For SMT source group, it is better to move a task
> + * to a CPU that doesn't have multiple tasks sharing its CPU
> capacity.
> + * Note that if a group has a single SMT, SD_SHARE_CPUCAPACITY
> + * will not be on.
> + */
> + if (group->flags & SD_SHARE_CPUCAPACITY &&
> + sgs->sum_h_nr_running > 1)
> + return true;
> +
> + return false;
> +}
> +
> static inline bool
> sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
> {
> @@ -9425,6 +9464,10 @@ static inline void update_sg_lb_stats(struct
> lb_env *env,
> sgs->group_asym_packing = 1;
> }
>
> + /* Check for loaded SMT group to be balanced to dst CPU */
> + if (!local_group && smt_balance(env, sgs, group))
> + sgs->group_smt_balance = 1;
> +
> sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
>
> /* Computing avg_load makes sense only when group is overloaded */
> @@ -9509,6 +9552,7 @@ static bool update_sd_pick_busiest(struct lb_env
> *env,
> return false;
> break;
>
> + case group_smt_balance:
> case group_fully_busy:
> /*
> * Select the fully busy group with highest avg_load. In
> @@ -9537,6 +9581,18 @@ static bool update_sd_pick_busiest(struct lb_env
> *env,
> break;
>
> case group_has_spare:
> + /*
> + * Do not pick sg with SMT CPUs over sg with pure CPUs,
> + * as we do not want to pull task off SMT core with one task
> + * and make the core idle.
> + */
> + if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
> + if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
> + return false;
> + else
> + return true;
> + }
> +
> /*
> * Select not overloaded group with lowest number of idle cpus
> * and highest number of running tasks. We could also compare
> @@ -9733,6 +9789,7 @@ static bool update_pick_idlest(struct sched_group
> *idlest,
>
> case group_imbalanced:
> case group_asym_packing:
> + case group_smt_balance:
> /* Those types are not used in the slow wakeup path */
> return false;
>
> @@ -9864,6 +9921,7 @@ find_idlest_group(struct sched_domain *sd,
> struct task_struct *p, int this_cpu)
>
> case group_imbalanced:
> case group_asym_packing:
> + case group_smt_balance:
> /* Those type are not used in the slow wakeup path */
> return NULL;
>
> @@ -10118,6 +10176,13 @@ static inline void calculate_imbalance(struct
> lb_env *env, struct sd_lb_stats *s
> return;
> }
>
> + if (busiest->group_type == group_smt_balance) {
> + /* Reduce number of tasks sharing CPU capacity */
> + env->migration_type = migrate_task;
> + env->imbalance = 1;
> + return;
> + }
> +
> if (busiest->group_type == group_imbalanced) {
> /*
> * In the group_imb case we cannot rely on group-wide averages
> @@ -10363,16 +10428,23 @@ static struct sched_group
> *find_busiest_group(struct lb_env *env)
> goto force_balance;
>
> if (busiest->group_type != group_overloaded) {
> - if (env->idle == CPU_NOT_IDLE)
> + if (env->idle == CPU_NOT_IDLE) {
> /*
> * If the busiest group is not overloaded (and as a
> * result the local one too) but this CPU is already
> * busy, let another idle CPU try to pull task.
> */
> goto out_balanced;
> + }
> +
> + if (busiest->group_type == group_smt_balance &&
> + smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
> + /* Let non SMT CPU pull from SMT CPU sharing with sibling */
> + goto force_balance;
> + }
>
> if (busiest->group_weight > 1 &&
> - local->idle_cpus <= (busiest->idle_cpus + 1))
> + local->idle_cpus <= (busiest->idle_cpus + 1)) {
> /*
> * If the busiest group is not overloaded
> * and there is no imbalance between this and busiest
> @@ -10383,12 +10455,14 @@ static struct sched_group
> *find_busiest_group(struct lb_env *env)
> * there is more than 1 CPU per group.
> */
> goto out_balanced;
> + }
>
> - if (busiest->sum_h_nr_running == 1)
> + if (busiest->sum_h_nr_running == 1) {
> /*
> * busiest doesn't have any tasks waiting to run
> */
> goto out_balanced;
> + }
> }
>
> force_balance:
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 87317634fab2..f636d6c09dc6 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -8279,6 +8279,11 @@ enum group_type {
> > * more powerful CPU.
> > */
> > group_misfit_task,
> > + /*
> > + * Balance SMT group that's fully busy. Can benefit from migration
> > + * a task on SMT with busy sibling to another CPU on idle core.
> > + */
> > + group_smt_balance,
>
> Would it make sense to move smt_balance?, s.t. we get:
>
> group_has_spare < group_smt_balance < group_fully_busy
>
> Conceptually I would be more intuitive to me like this as the
> smt_balance groups are more busy than has_spare ones, but less busy
> then fully_busy ones.
>
> From a functional perspective I could also see some impact when
> update_sd_pick_busiest compares the group types. In that case we
> would remove tasks from fully busy groups before moving them
> from smt_balance groups. Not sure which way would be to prefer
> to increase overall throughput.
>
> Since smt_balance is only selected if the group has SMT, this
> should still not pull the last task off of a non-SMT CPU.
>
>
I think you have similar concerns as Shrikanth on this patch.
Can you see if my fix to update_sd_pick_busiest() in my reply
to Shrikanth addresses what you have in mind.
Tim
© 2016 - 2026 Red Hat, Inc.