From: luca abeni <luca.abeni@santannapisa.it>
Add dl_init_tg to initialize and/or update a rt-cgroup dl_server and to
also account the allocated bandwidth. This function is currently unhooked
and will be later used to allocate bandwidth to rt-cgroups.
Add lock guard for raw_spin_rq_lock_irq for cleaner code.
Co-developed-by: Alessio Balsini <a.balsini@sssup.it>
Signed-off-by: Alessio Balsini <a.balsini@sssup.it>
Co-developed-by: Andrea Parri <parri.andrea@gmail.com>
Signed-off-by: Andrea Parri <parri.andrea@gmail.com>
Co-developed-by: Yuri Andriaccio <yurand2000@gmail.com>
Signed-off-by: Yuri Andriaccio <yurand2000@gmail.com>
Signed-off-by: luca abeni <luca.abeni@santannapisa.it>
---
kernel/sched/deadline.c | 31 +++++++++++++++++++++++++++++++
kernel/sched/sched.h | 5 +++++
2 files changed, 36 insertions(+)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 3046148c94..4f43883a65 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -340,6 +340,37 @@ void cancel_inactive_timer(struct sched_dl_entity *dl_se)
cancel_dl_timer(dl_se, &dl_se->inactive_timer);
}
+#ifdef CONFIG_RT_GROUP_SCHED
+void dl_init_tg(struct sched_dl_entity *dl_se, u64 rt_runtime, u64 rt_period)
+{
+ struct rq *rq = container_of(dl_se->dl_rq, struct rq, dl);
+ int is_active;
+ u64 new_bw;
+
+ guard(raw_spin_rq_lock_irq)(rq);
+ is_active = dl_se->my_q->rt.rt_nr_running > 0;
+
+ update_rq_clock(rq);
+ dl_server_stop(dl_se);
+
+ new_bw = to_ratio(rt_period, rt_runtime);
+ dl_rq_change_utilization(rq, dl_se, new_bw);
+
+ dl_se->dl_runtime = rt_runtime;
+ dl_se->dl_deadline = rt_period;
+ dl_se->dl_period = rt_period;
+
+ dl_se->runtime = 0;
+ dl_se->deadline = 0;
+
+ dl_se->dl_bw = new_bw;
+ dl_se->dl_density = new_bw;
+
+ if (is_active)
+ dl_server_start(dl_se);
+}
+#endif
+
static void dl_change_utilization(struct task_struct *p, u64 new_bw)
{
WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6f6e39a628..f42bef06a9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -419,6 +419,7 @@ extern void dl_server_init(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq,
struct rq *served_rq,
dl_server_pick_f pick_task);
extern void sched_init_dl_servers(void);
+extern void dl_init_tg(struct sched_dl_entity *dl_se, u64 rt_runtime, u64 rt_period);
extern void dl_server_update_idle_time(struct rq *rq,
struct task_struct *p);
@@ -1922,6 +1923,10 @@ DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
rq_unlock_irqrestore(_T->lock, &_T->rf),
struct rq_flags rf)
+DEFINE_LOCK_GUARD_1(raw_spin_rq_lock_irq, struct rq,
+ raw_spin_rq_lock_irq(_T->lock),
+ raw_spin_rq_unlock_irq(_T->lock))
+
static inline struct rq *this_rq_lock_irq(struct rq_flags *rf)
__acquires(rq->lock)
{
--
2.51.0