[PATCH v9 RESEND 2/3] sched/fair: Remove task_group->se pointer array

Zecheng Li posted 3 patches 2 weeks ago
[PATCH v9 RESEND 2/3] sched/fair: Remove task_group->se pointer array
Posted by Zecheng Li 2 weeks ago
From: Zecheng Li <zecheng@google.com>

Now that struct sched_entity is co-located with struct cfs_rq for
non-root task groups, the task_group->se pointer array is redundant. The
associated sched_entity can be loaded directly from the cfs_rq.

This patch performs the access conversion with the helpers:

- is_root_task_group(tg): checks if a task group is the root task group.
It compares the task group's address with the global root_task_group
variable.

- tg_se(tg, cpu): retrieves the cfs_rq and returns the address of the
co-located se. This function checks if tg is the root task group to
ensure behaving the same of previous tg->se[cpu]. Replaces all accesses
that use the tg->se[cpu] pointer array with calls to the new tg_se(tg,
cpu) accessor.

- cfs_rq_se(cfs_rq): simplifies access paths like cfs_rq->tg->se[...] to
use the co-located sched_entity. This function also checks if tg is the
root task group to ensure same behavior.

Since tg_se is not in very hot code paths, and the branch is a register
comparison with an immediate value (`&root_task_group`), the performance
impact is expected to be negligible.

Signed-off-by: Zecheng Li <zecheng@google.com>
Signed-off-by: Zecheng Li <zli94@ncsu.edu>
Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>
---
 kernel/sched/core.c  |  7 ++-----
 kernel/sched/debug.c |  2 +-
 kernel/sched/fair.c  | 25 +++++++++----------------
 kernel/sched/sched.h | 31 ++++++++++++++++++++++++++-----
 4 files changed, 38 insertions(+), 27 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 64b467c1d5b6..2df42786f780 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8609,7 +8609,7 @@ void __init sched_init(void)
 	wait_bit_init();
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-	ptr += 2 * nr_cpu_ids * sizeof(void **);
+	ptr += nr_cpu_ids * sizeof(void **);
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
 	ptr += 2 * nr_cpu_ids * sizeof(void **);
@@ -8618,9 +8618,6 @@ void __init sched_init(void)
 		ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-		root_task_group.se = (struct sched_entity **)ptr;
-		ptr += nr_cpu_ids * sizeof(void **);
-
 		root_task_group.cfs_rq = (struct cfs_rq **)ptr;
 		ptr += nr_cpu_ids * sizeof(void **);
 
@@ -9697,7 +9694,7 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
 		int i;
 
 		for_each_possible_cpu(i) {
-			stats = __schedstats_from_se(tg->se[i]);
+			stats = __schedstats_from_se(tg_se(tg, i));
 			ws += schedstat_val(stats->wait_sum);
 		}
 
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 6246008c431e..7e7860d56b4b 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -750,7 +750,7 @@ void dirty_sched_domain_sysctl(int cpu)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
 {
-	struct sched_entity *se = tg->se[cpu];
+	struct sched_entity *se = tg_se(tg, cpu);
 
 #define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
 #define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	\
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 79160d419c9f..c116e7c2b916 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6212,7 +6212,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 {
 	struct rq *rq = rq_of(cfs_rq);
 	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
-	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
+	struct sched_entity *se = cfs_rq_se(cfs_rq);
 
 	/*
 	 * It's possible we are called with runtime_remaining < 0 due to things
@@ -10067,7 +10067,6 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
 {
 	struct cfs_rq *cfs_rq, *pos;
 	bool decayed = false;
-	int cpu = cpu_of(rq);
 
 	/*
 	 * Iterates the task_group tree in a bottom up fashion, see
@@ -10087,7 +10086,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
 		}
 
 		/* Propagate pending load changes to the parent, if any: */
-		se = cfs_rq->tg->se[cpu];
+		se = cfs_rq_se(cfs_rq);
 		if (se && !skip_blocked_update(se))
 			update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
 
@@ -10113,8 +10112,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
  */
 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
 {
-	struct rq *rq = rq_of(cfs_rq);
-	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
+	struct sched_entity *se = cfs_rq_se(cfs_rq);
 	unsigned long now = jiffies;
 	unsigned long load;
 
@@ -13880,7 +13878,6 @@ void free_fair_sched_group(struct task_group *tg)
 	}
 
 	kfree(tg->cfs_rq);
-	kfree(tg->se);
 }
 
 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
@@ -13893,9 +13890,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 	tg->cfs_rq = kzalloc_objs(cfs_rq, nr_cpu_ids);
 	if (!tg->cfs_rq)
 		goto err;
-	tg->se = kzalloc_objs(se, nr_cpu_ids);
-	if (!tg->se)
-		goto err;
 
 	tg->shares = NICE_0_LOAD;
 
@@ -13910,7 +13904,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 		cfs_rq = &state->cfs_rq;
 		se = &state->se;
 		init_cfs_rq(cfs_rq);
-		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
+		init_tg_cfs_entry(tg, cfs_rq, se, i, tg_se(parent, i));
 		init_entity_runnable_average(se);
 	}
 
@@ -13929,7 +13923,7 @@ void online_fair_sched_group(struct task_group *tg)
 
 	for_each_possible_cpu(i) {
 		rq = cpu_rq(i);
-		se = tg->se[i];
+		se = tg_se(tg, i);
 		rq_lock_irq(rq, &rf);
 		update_rq_clock(rq);
 		attach_entity_cfs_rq(se);
@@ -13946,7 +13940,7 @@ void unregister_fair_sched_group(struct task_group *tg)
 
 	for_each_possible_cpu(cpu) {
 		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
-		struct sched_entity *se = tg->se[cpu];
+		struct sched_entity *se = tg_se(tg, cpu);
 		struct rq *rq = cpu_rq(cpu);
 
 		if (se) {
@@ -13983,7 +13977,6 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 	init_cfs_rq_runtime(cfs_rq);
 
 	tg->cfs_rq[cpu] = cfs_rq;
-	tg->se[cpu] = se;
 
 	/* se could be NULL for root_task_group */
 	if (!se)
@@ -14014,7 +14007,7 @@ static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
 	/*
 	 * We can't change the weight of the root cgroup.
 	 */
-	if (!tg->se[0])
+	if (is_root_task_group(tg))
 		return -EINVAL;
 
 	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
@@ -14025,7 +14018,7 @@ static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
 	tg->shares = shares;
 	for_each_possible_cpu(i) {
 		struct rq *rq = cpu_rq(i);
-		struct sched_entity *se = tg->se[i];
+		struct sched_entity *se = tg_se(tg, i);
 		struct rq_flags rf;
 
 		/* Propagate contribution to hierarchy */
@@ -14076,7 +14069,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
 
 	for_each_possible_cpu(i) {
 		struct rq *rq = cpu_rq(i);
-		struct sched_entity *se = tg->se[i];
+		struct sched_entity *se = tg_se(tg, i);
 		struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i];
 		bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
 		long idle_task_delta;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 826f21fa3f36..258dc689a535 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -480,8 +480,6 @@ struct task_group {
 #endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-	/* schedulable entities of this group on each CPU */
-	struct sched_entity	**se;
 	/* runqueue "owned" by this group on each CPU */
 	struct cfs_rq		**cfs_rq;
 	unsigned long		shares;
@@ -920,7 +918,8 @@ struct dl_rq {
 };
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-
+/* Check whether a task group is root tg */
+#define is_root_task_group(tg) ((tg) == &root_task_group)
 /* An entity is a task if it doesn't "own" a runqueue */
 #define entity_is_task(se)	(!se->my_q)
 
@@ -2259,6 +2258,28 @@ struct cfs_tg_state {
 	struct sched_entity	se;
 	struct sched_statistics	stats;
 } __no_randomize_layout;
+
+static inline struct sched_entity *tg_se(struct task_group *tg, int cpu)
+{
+	struct cfs_tg_state *state;
+
+	if (is_root_task_group(tg))
+		return NULL;
+
+	state = container_of(tg->cfs_rq[cpu], struct cfs_tg_state, cfs_rq);
+	return &state->se;
+}
+
+static inline struct sched_entity *cfs_rq_se(struct cfs_rq *cfs_rq)
+{
+	struct cfs_tg_state *state;
+
+	if (is_root_task_group(cfs_rq->tg))
+		return NULL;
+
+	state = container_of(cfs_rq, struct cfs_tg_state, cfs_rq);
+	return &state->se;
+}
 #endif
 
 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -2271,8 +2292,8 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
 	p->se.cfs_rq = tg->cfs_rq[cpu];
-	p->se.parent = tg->se[cpu];
-	p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0;
+	p->se.parent = tg_se(tg, cpu);
+	p->se.depth = p->se.parent ? p->se.parent->depth + 1 : 0;
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
-- 
2.53.0
Re: [PATCH v9 RESEND 2/3] sched/fair: Remove task_group->se pointer array
Posted by Josh Don 2 days, 11 hours ago
Thanks Zecheng,

On Thu, Mar 19, 2026 at 10:52 AM Zecheng Li <zli94@ncsu.edu> wrote:
>
> From: Zecheng Li <zecheng@google.com>
>
> Now that struct sched_entity is co-located with struct cfs_rq for
> non-root task groups, the task_group->se pointer array is redundant. The
> associated sched_entity can be loaded directly from the cfs_rq.
>
> This patch performs the access conversion with the helpers:
>
> - is_root_task_group(tg): checks if a task group is the root task group.
> It compares the task group's address with the global root_task_group
> variable.
>
> - tg_se(tg, cpu): retrieves the cfs_rq and returns the address of the
> co-located se. This function checks if tg is the root task group to
> ensure behaving the same of previous tg->se[cpu]. Replaces all accesses
> that use the tg->se[cpu] pointer array with calls to the new tg_se(tg,
> cpu) accessor.
>
> - cfs_rq_se(cfs_rq): simplifies access paths like cfs_rq->tg->se[...] to
> use the co-located sched_entity. This function also checks if tg is the
> root task group to ensure same behavior.
>
> Since tg_se is not in very hot code paths, and the branch is a register
> comparison with an immediate value (`&root_task_group`), the performance
> impact is expected to be negligible.
>
> Signed-off-by: Zecheng Li <zecheng@google.com>
> Signed-off-by: Zecheng Li <zli94@ncsu.edu>
> Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>
> ---
>  kernel/sched/core.c  |  7 ++-----
>  kernel/sched/debug.c |  2 +-
>  kernel/sched/fair.c  | 25 +++++++++----------------
>  kernel/sched/sched.h | 31 ++++++++++++++++++++++++++-----
>  4 files changed, 38 insertions(+), 27 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 64b467c1d5b6..2df42786f780 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -8609,7 +8609,7 @@ void __init sched_init(void)
>         wait_bit_init();
>
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> -       ptr += 2 * nr_cpu_ids * sizeof(void **);
> +       ptr += nr_cpu_ids * sizeof(void **);
>  #endif
>  #ifdef CONFIG_RT_GROUP_SCHED
>         ptr += 2 * nr_cpu_ids * sizeof(void **);
> @@ -8618,9 +8618,6 @@ void __init sched_init(void)
>                 ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
>
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> -               root_task_group.se = (struct sched_entity **)ptr;
> -               ptr += nr_cpu_ids * sizeof(void **);
> -
>                 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
>                 ptr += nr_cpu_ids * sizeof(void **);
>
> @@ -9697,7 +9694,7 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
>                 int i;
>
>                 for_each_possible_cpu(i) {
> -                       stats = __schedstats_from_se(tg->se[i]);
> +                       stats = __schedstats_from_se(tg_se(tg, i));
>                         ws += schedstat_val(stats->wait_sum);
>                 }
>
> diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
> index 6246008c431e..7e7860d56b4b 100644
> --- a/kernel/sched/debug.c
> +++ b/kernel/sched/debug.c
> @@ -750,7 +750,7 @@ void dirty_sched_domain_sysctl(int cpu)
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>  static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
>  {
> -       struct sched_entity *se = tg->se[cpu];
> +       struct sched_entity *se = tg_se(tg, cpu);
>
>  #define P(F)           SEQ_printf(m, "  .%-30s: %lld\n",       #F, (long long)F)
>  #define P_SCHEDSTAT(F) SEQ_printf(m, "  .%-30s: %lld\n",       \
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 79160d419c9f..c116e7c2b916 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -6212,7 +6212,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
>  {
>         struct rq *rq = rq_of(cfs_rq);
>         struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
> -       struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
> +       struct sched_entity *se = cfs_rq_se(cfs_rq);
>
>         /*
>          * It's possible we are called with runtime_remaining < 0 due to things
> @@ -10067,7 +10067,6 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
>  {
>         struct cfs_rq *cfs_rq, *pos;
>         bool decayed = false;
> -       int cpu = cpu_of(rq);
>
>         /*
>          * Iterates the task_group tree in a bottom up fashion, see
> @@ -10087,7 +10086,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
>                 }
>
>                 /* Propagate pending load changes to the parent, if any: */
> -               se = cfs_rq->tg->se[cpu];
> +               se = cfs_rq_se(cfs_rq);
>                 if (se && !skip_blocked_update(se))
>                         update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
>
> @@ -10113,8 +10112,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
>   */
>  static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
>  {
> -       struct rq *rq = rq_of(cfs_rq);
> -       struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
> +       struct sched_entity *se = cfs_rq_se(cfs_rq);
>         unsigned long now = jiffies;
>         unsigned long load;
>
> @@ -13880,7 +13878,6 @@ void free_fair_sched_group(struct task_group *tg)
>         }
>
>         kfree(tg->cfs_rq);
> -       kfree(tg->se);
>  }
>
>  int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
> @@ -13893,9 +13890,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
>         tg->cfs_rq = kzalloc_objs(cfs_rq, nr_cpu_ids);
>         if (!tg->cfs_rq)
>                 goto err;
> -       tg->se = kzalloc_objs(se, nr_cpu_ids);
> -       if (!tg->se)
> -               goto err;
>
>         tg->shares = NICE_0_LOAD;
>
> @@ -13910,7 +13904,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
>                 cfs_rq = &state->cfs_rq;
>                 se = &state->se;
>                 init_cfs_rq(cfs_rq);
> -               init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
> +               init_tg_cfs_entry(tg, cfs_rq, se, i, tg_se(parent, i));
>                 init_entity_runnable_average(se);
>         }
>
> @@ -13929,7 +13923,7 @@ void online_fair_sched_group(struct task_group *tg)
>
>         for_each_possible_cpu(i) {
>                 rq = cpu_rq(i);
> -               se = tg->se[i];
> +               se = tg_se(tg, i);
>                 rq_lock_irq(rq, &rf);
>                 update_rq_clock(rq);
>                 attach_entity_cfs_rq(se);
> @@ -13946,7 +13940,7 @@ void unregister_fair_sched_group(struct task_group *tg)
>
>         for_each_possible_cpu(cpu) {
>                 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
> -               struct sched_entity *se = tg->se[cpu];
> +               struct sched_entity *se = tg_se(tg, cpu);
>                 struct rq *rq = cpu_rq(cpu);
>
>                 if (se) {
> @@ -13983,7 +13977,6 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
>         init_cfs_rq_runtime(cfs_rq);
>
>         tg->cfs_rq[cpu] = cfs_rq;
> -       tg->se[cpu] = se;
>
>         /* se could be NULL for root_task_group */
>         if (!se)
> @@ -14014,7 +14007,7 @@ static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
>         /*
>          * We can't change the weight of the root cgroup.
>          */
> -       if (!tg->se[0])
> +       if (is_root_task_group(tg))
>                 return -EINVAL;
>
>         shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
> @@ -14025,7 +14018,7 @@ static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
>         tg->shares = shares;
>         for_each_possible_cpu(i) {
>                 struct rq *rq = cpu_rq(i);
> -               struct sched_entity *se = tg->se[i];
> +               struct sched_entity *se = tg_se(tg, i);
>                 struct rq_flags rf;
>
>                 /* Propagate contribution to hierarchy */
> @@ -14076,7 +14069,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
>
>         for_each_possible_cpu(i) {
>                 struct rq *rq = cpu_rq(i);
> -               struct sched_entity *se = tg->se[i];
> +               struct sched_entity *se = tg_se(tg, i);
>                 struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i];
>                 bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
>                 long idle_task_delta;
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 826f21fa3f36..258dc689a535 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -480,8 +480,6 @@ struct task_group {
>  #endif
>
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> -       /* schedulable entities of this group on each CPU */
> -       struct sched_entity     **se;
>         /* runqueue "owned" by this group on each CPU */
>         struct cfs_rq           **cfs_rq;
>         unsigned long           shares;
> @@ -920,7 +918,8 @@ struct dl_rq {
>  };
>
>  #ifdef CONFIG_FAIR_GROUP_SCHED
> -
> +/* Check whether a task group is root tg */
> +#define is_root_task_group(tg) ((tg) == &root_task_group)
>  /* An entity is a task if it doesn't "own" a runqueue */
>  #define entity_is_task(se)     (!se->my_q)
>
> @@ -2259,6 +2258,28 @@ struct cfs_tg_state {
>         struct sched_entity     se;
>         struct sched_statistics stats;
>  } __no_randomize_layout;
> +
> +static inline struct sched_entity *tg_se(struct task_group *tg, int cpu)
> +{
> +       struct cfs_tg_state *state;
> +
> +       if (is_root_task_group(tg))
> +               return NULL;
> +
> +       state = container_of(tg->cfs_rq[cpu], struct cfs_tg_state, cfs_rq);
> +       return &state->se;
> +}
> +
> +static inline struct sched_entity *cfs_rq_se(struct cfs_rq *cfs_rq)
> +{
> +       struct cfs_tg_state *state;
> +
> +       if (is_root_task_group(cfs_rq->tg))
> +               return NULL;
> +
> +       state = container_of(cfs_rq, struct cfs_tg_state, cfs_rq);
> +       return &state->se;
> +}
>  #endif
>
>  /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
> @@ -2271,8 +2292,8 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>         set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
>         p->se.cfs_rq = tg->cfs_rq[cpu];
> -       p->se.parent = tg->se[cpu];
> -       p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0;
> +       p->se.parent = tg_se(tg, cpu);
> +       p->se.depth = p->se.parent ? p->se.parent->depth + 1 : 0;
>  #endif
>
>  #ifdef CONFIG_RT_GROUP_SCHED
> --
> 2.53.0
>

Reviewed-by: Josh Don <joshdon@google.com>