kernel/sched/core.c | 6 ------ kernel/sched/rt.c | 17 +++++------------ 2 files changed, 5 insertions(+), 18 deletions(-)
After commit 5f6bd380c7bd ("sched/rt: Remove default bandwidth control"),
these bandwidth control members are only initialized when
CONFIG_RT_GROUP_SCHED is enabled.
Remove the unnecessary CONFIG_RT_GROUP_SCHED from init_rt_rq() and
initialize the members in init_tg_rt_entry().
In sched_init(), the rt_runtime of rt_bandwidth will be initialized by
global_rt_runtime(), so we can unify the rt_runtime of rt_rq by it in
init_tg_rt_entry().
Also, remove the unnecessary CONFIG_RT_GROUP_SCHED in rt_se_prio().
Signed-off-by: Yajun Deng <yajun.deng@linux.dev>
---
kernel/sched/core.c | 6 ------
kernel/sched/rt.c | 17 +++++------------
2 files changed, 5 insertions(+), 18 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index be00629f0ba4..e9d6ceead9f4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8772,12 +8772,6 @@ void __init sched_init(void)
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
- /*
- * This is required for init cpu because rt.c:__enable_runtime()
- * starts working after scheduler_running, which is not the case
- * yet.
- */
- rq->rt.rt_runtime = global_rt_runtime();
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
#endif
rq->sd = NULL;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7936d4333731..390f3d08abbe 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -84,14 +84,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
plist_head_init(&rt_rq->pushable_tasks);
/* We start is dequeued state, because no RT tasks are queued */
rt_rq->rt_queued = 0;
-
-#ifdef CONFIG_RT_GROUP_SCHED
- rt_rq->rt_time = 0;
- rt_rq->rt_throttled = 0;
- rt_rq->rt_runtime = 0;
- raw_spin_lock_init(&rt_rq->rt_runtime_lock);
- rt_rq->tg = &root_task_group;
-#endif
}
#ifdef CONFIG_RT_GROUP_SCHED
@@ -229,10 +221,14 @@ void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
{
struct rq *rq = cpu_rq(cpu);
- rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
+ rt_rq->rt_time = 0;
+ rt_rq->rt_throttled = 0;
rt_rq->rt_nr_boosted = 0;
+ raw_spin_lock_init(&rt_rq->rt_runtime_lock);
+
rt_rq->rq = rq;
rt_rq->tg = tg;
+ rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
tg->rt_rq[cpu] = rt_rq;
tg->rt_se[cpu] = rt_se;
@@ -280,7 +276,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
goto err_free_rq;
init_rt_rq(rt_rq);
- rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
}
@@ -957,12 +952,10 @@ static void __disable_runtime(struct rq *rq) { }
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
-#ifdef CONFIG_RT_GROUP_SCHED
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq)
return rt_rq->highest_prio.curr;
-#endif
return rt_task_of(rt_se)->prio;
}
--
2.25.1
Hi all, Gentle ping. Thanks On 2025/8/18 14:17, Yajun Deng wrote: > After commit 5f6bd380c7bd ("sched/rt: Remove default bandwidth control"), > these bandwidth control members are only initialized when > CONFIG_RT_GROUP_SCHED is enabled. > > Remove the unnecessary CONFIG_RT_GROUP_SCHED from init_rt_rq() and > initialize the members in init_tg_rt_entry(). > > In sched_init(), the rt_runtime of rt_bandwidth will be initialized by > global_rt_runtime(), so we can unify the rt_runtime of rt_rq by it in > init_tg_rt_entry(). > > Also, remove the unnecessary CONFIG_RT_GROUP_SCHED in rt_se_prio(). > > Signed-off-by: Yajun Deng <yajun.deng@linux.dev> > --- > kernel/sched/core.c | 6 ------ > kernel/sched/rt.c | 17 +++++------------ > 2 files changed, 5 insertions(+), 18 deletions(-) > > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > index be00629f0ba4..e9d6ceead9f4 100644 > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -8772,12 +8772,6 @@ void __init sched_init(void) > #endif /* CONFIG_FAIR_GROUP_SCHED */ > > #ifdef CONFIG_RT_GROUP_SCHED > - /* > - * This is required for init cpu because rt.c:__enable_runtime() > - * starts working after scheduler_running, which is not the case > - * yet. > - */ > - rq->rt.rt_runtime = global_rt_runtime(); > init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); > #endif > rq->sd = NULL; > diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c > index 7936d4333731..390f3d08abbe 100644 > --- a/kernel/sched/rt.c > +++ b/kernel/sched/rt.c > @@ -84,14 +84,6 @@ void init_rt_rq(struct rt_rq *rt_rq) > plist_head_init(&rt_rq->pushable_tasks); > /* We start is dequeued state, because no RT tasks are queued */ > rt_rq->rt_queued = 0; > - > -#ifdef CONFIG_RT_GROUP_SCHED > - rt_rq->rt_time = 0; > - rt_rq->rt_throttled = 0; > - rt_rq->rt_runtime = 0; > - raw_spin_lock_init(&rt_rq->rt_runtime_lock); > - rt_rq->tg = &root_task_group; > -#endif > } > > #ifdef CONFIG_RT_GROUP_SCHED > @@ -229,10 +221,14 @@ void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, > { > struct rq *rq = cpu_rq(cpu); > > - rt_rq->highest_prio.curr = MAX_RT_PRIO-1; > + rt_rq->rt_time = 0; > + rt_rq->rt_throttled = 0; > rt_rq->rt_nr_boosted = 0; > + raw_spin_lock_init(&rt_rq->rt_runtime_lock); > + > rt_rq->rq = rq; > rt_rq->tg = tg; > + rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; > > tg->rt_rq[cpu] = rt_rq; > tg->rt_se[cpu] = rt_se; > @@ -280,7 +276,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) > goto err_free_rq; > > init_rt_rq(rt_rq); > - rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; > init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); > } > > @@ -957,12 +952,10 @@ static void __disable_runtime(struct rq *rq) { } > > static inline int rt_se_prio(struct sched_rt_entity *rt_se) > { > -#ifdef CONFIG_RT_GROUP_SCHED > struct rt_rq *rt_rq = group_rt_rq(rt_se); > > if (rt_rq) > return rt_rq->highest_prio.curr; > -#endif > > return rt_task_of(rt_se)->prio; > }
© 2016 - 2025 Red Hat, Inc.