[RFC PATCH v4 12/28] sched/rt: Implement dl-server operations for rt-cgroups.

Yuri Andriaccio posted 28 patches 8 hours ago
[RFC PATCH v4 12/28] sched/rt: Implement dl-server operations for rt-cgroups.
Posted by Yuri Andriaccio 8 hours ago
- Implement rt_server_pick, the callback that deadline servers use to
  pick a task to schedule.
  - rt_server_pick(): pick the next runnable rt task and tell the
    scheduler that it is going to be scheduled next.

- Let enqueue/dequeue_task_rt function start/stop the attached deadline
  server when the first/last task is enqueued/dequeued on a specific
  rq/server.

- Change update_curr_rt to perform a deadline server update if the
  updated task is served by non-root group.

- Update inc/dec_dl_tasks to account the number of active tasks in the
  local runqueue for rt-cgroups servers, as their local runqueue is
  different from the global runqueue, and thus when a rt-group server is
  activated/deactivated, the number of served tasks' must be
  added/removed. This uses nr_running to be compatible with future
  dl-server interfaces.

- Update inc/dec_rt_prio_smp to change a rq's cpupri only if the rt_rq
  is the global runqueue, since cgroups are scheduled via their
  dl-server priority.

- Update inc/dec_rt_tasks to account for waking/sleeping tasks on the
  global runqueue, when the task runs on the root cgroup, or its local
  dl server is active. The accounting is not done when servers are
  throttled, as they will add/sub the number of tasks running when they
  get enqueued/dequeued. For rt cgroups, account for the number of active
  tasks in the nr_running field of the local runqueue
  (add/sub_nr_running), as this number is used when a dl server is
  enqueued/dequeued.

- Update set_task_rq to record the dl_rq, tracking which deadline
  server manages a task.

- Update set_task_rq to not use the parent field anymore, as it is
  unused by this patchset's code. Remove the unused parent field from
  sched_rt_entity.

Co-developed-by: Alessio Balsini <a.balsini@sssup.it>
Signed-off-by: Alessio Balsini <a.balsini@sssup.it>
Co-developed-by: Andrea Parri <parri.andrea@gmail.com>
Signed-off-by: Andrea Parri <parri.andrea@gmail.com>
Co-developed-by: luca abeni <luca.abeni@santannapisa.it>
Signed-off-by: luca abeni <luca.abeni@santannapisa.it>
Signed-off-by: Yuri Andriaccio <yurand2000@gmail.com>
---
 include/linux/sched.h   |  1 -
 kernel/sched/deadline.c |  8 +++++
 kernel/sched/rt.c       | 68 ++++++++++++++++++++++++++++++++++++++---
 kernel/sched/sched.h    |  8 ++++-
 4 files changed, 79 insertions(+), 6 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 000aa3b2b1..3f1f15b6d2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -629,7 +629,6 @@ struct sched_rt_entity {

 	struct sched_rt_entity		*back;
 #ifdef CONFIG_RT_GROUP_SCHED
-	struct sched_rt_entity		*parent;
 	/* rq on which this entity is (to be) queued: */
 	struct rt_rq			*rt_rq;
 	/* rq "owned" by this entity/group: */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 089fd2c9b7..b890fdd4b2 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1847,6 +1847,10 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)

 	if (!dl_server(dl_se))
 		add_nr_running(rq_of_dl_rq(dl_rq), 1);
+	else if (rq_of_dl_se(dl_se) != dl_se->my_q) {
+		WARN_ON(dl_se->my_q->rt.rt_nr_running != dl_se->my_q->nr_running);
+		add_nr_running(rq_of_dl_rq(dl_rq), dl_se->my_q->nr_running);
+	}

 	inc_dl_deadline(dl_rq, deadline);
 }
@@ -1859,6 +1863,10 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)

 	if (!dl_server(dl_se))
 		sub_nr_running(rq_of_dl_rq(dl_rq), 1);
+	else if (rq_of_dl_se(dl_se) != dl_se->my_q) {
+		WARN_ON(dl_se->my_q->rt.rt_nr_running != dl_se->my_q->nr_running);
+		sub_nr_running(rq_of_dl_rq(dl_rq), dl_se->my_q->nr_running);
+	}

 	dec_dl_deadline(dl_rq, dl_se->deadline);
 }
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2301efc03f..7ec117a18d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -128,9 +128,22 @@ void free_rt_sched_group(struct task_group *tg)
 	kfree(tg->dl_se);
 }

+static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq);
+static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first);
+
 static struct task_struct *rt_server_pick(struct sched_dl_entity *dl_se)
 {
-	return NULL;
+	struct rt_rq *rt_rq = &dl_se->my_q->rt;
+	struct rq *rq = rq_of_rt_rq(rt_rq);
+	struct task_struct *p;
+
+	if (dl_se->my_q->rt.rt_nr_running == 0)
+		return NULL;
+
+	p = rt_task_of(pick_next_rt_entity(rt_rq));
+	set_next_task_rt(rq, p, true);
+
+	return p;
 }

 static inline void __rt_rq_free(struct rt_rq **rt_rq)
@@ -435,6 +448,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 static void update_curr_rt(struct rq *rq)
 {
 	struct task_struct *donor = rq->donor;
+	struct rt_rq *rt_rq;
 	s64 delta_exec;

 	if (donor->sched_class != &rt_sched_class)
@@ -444,8 +458,18 @@ static void update_curr_rt(struct rq *rq)
 	if (unlikely(delta_exec <= 0))
 		return;

-	if (!rt_bandwidth_enabled())
+	if (!rt_group_sched_enabled())
 		return;
+
+	if (!dl_bandwidth_enabled())
+		return;
+
+	rt_rq = rt_rq_of_se(&donor->rt);
+	if (is_dl_group(rt_rq)) {
+		struct sched_dl_entity *dl_se = dl_group_of(rt_rq);
+
+		dl_server_update(dl_se, delta_exec);
+	}
 }

 static void
@@ -456,7 +480,7 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 	/*
 	 * Change rq's cpupri only if rt_rq is the top queue.
 	 */
-	if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq)
+	if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && is_dl_group(rt_rq))
 		return;

 	if (rq->online && prio < prev_prio)
@@ -471,7 +495,7 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 	/*
 	 * Change rq's cpupri only if rt_rq is the top queue.
 	 */
-	if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq)
+	if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && is_dl_group(rt_rq))
 		return;

 	if (rq->online && rt_rq->highest_prio.curr != prev_prio)
@@ -534,6 +558,16 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 	rt_rq->rr_nr_running += is_rr_task(rt_se);

 	inc_rt_prio(rt_rq, rt_se_prio(rt_se));
+
+	if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && is_dl_group(rt_rq)) {
+		struct sched_dl_entity *dl_se = dl_group_of(rt_rq);
+
+		if (!dl_se->dl_throttled)
+			add_nr_running(rq_of_rt_rq(rt_rq), 1);
+		add_nr_running(served_rq_of_rt_rq(rt_rq), 1);
+	} else {
+		add_nr_running(rq_of_rt_rq(rt_rq), 1);
+	}
 }

 static inline
@@ -544,6 +578,16 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 	rt_rq->rr_nr_running -= is_rr_task(rt_se);

 	dec_rt_prio(rt_rq, rt_se_prio(rt_se));
+
+	if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && is_dl_group(rt_rq)) {
+		struct sched_dl_entity *dl_se = dl_group_of(rt_rq);
+
+		if (!dl_se->dl_throttled)
+			sub_nr_running(rq_of_rt_rq(rt_rq), 1);
+		sub_nr_running(served_rq_of_rt_rq(rt_rq), 1);
+	} else {
+		sub_nr_running(rq_of_rt_rq(rt_rq), 1);
+	}
 }

 /*
@@ -725,6 +769,14 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 	check_schedstat_required();
 	update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);

+	/* Task arriving in an idle group of tasks. */
+	if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) &&
+	    is_dl_group(rt_rq) && rt_rq->rt_nr_running == 0) {
+		struct sched_dl_entity *dl_se = dl_group_of(rt_rq);
+
+		dl_server_start(dl_se);
+	}
+
 	enqueue_rt_entity(rt_se, flags);

 	if (task_is_blocked(p))
@@ -744,6 +796,14 @@ static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)

 	dequeue_pushable_task(rt_rq, p);

+	/* Last task of the task group. */
+	if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) &&
+	    is_dl_group(rt_rq) && rt_rq->rt_nr_running == 0) {
+		struct sched_dl_entity *dl_se = dl_group_of(rt_rq);
+
+		dl_server_stop(dl_se);
+	}
+
 	return true;
 }

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f42bef06a9..fb4dcb4551 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2203,7 +2203,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 	if (!rt_group_sched_enabled())
 		tg = &root_task_group;
 	p->rt.rt_rq  = tg->rt_rq[cpu];
-	p->rt.parent = tg->rt_se[cpu];
+	p->dl.dl_rq  = &cpu_rq(cpu)->dl;
 #endif /* CONFIG_RT_GROUP_SCHED */
 }

@@ -2750,6 +2750,9 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
 	unsigned prev_nr = rq->nr_running;

 	rq->nr_running = prev_nr + count;
+	if (rq != cpu_rq(rq->cpu))
+		return;
+
 	if (trace_sched_update_nr_running_tp_enabled()) {
 		call_trace_sched_update_nr_running(rq, count);
 	}
@@ -2763,6 +2766,9 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
 static inline void sub_nr_running(struct rq *rq, unsigned count)
 {
 	rq->nr_running -= count;
+	if (rq != cpu_rq(rq->cpu))
+		return;
+
 	if (trace_sched_update_nr_running_tp_enabled()) {
 		call_trace_sched_update_nr_running(rq, -count);
 	}
--
2.51.0