[RFC PATCH v5 5/9] sched/fair: Hoist idle_stamp up from idle_balance

Chen Jinghuang posted 9 patches 2 weeks, 3 days ago
[RFC PATCH v5 5/9] sched/fair: Hoist idle_stamp up from idle_balance
Posted by Chen Jinghuang 2 weeks, 3 days ago
From: Steve Sistare <steven.sistare@oracle.com>

Move the update of idle_stamp from idle_balance to the call site in
pick_next_task_fair, to prepare for a future patch that adds work to
pick_next_task_fair which must be included in the idle_stamp interval.
No functional change.

Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
Signed-off-by: Chen Jinghuang <chenjinghuang2@huawei.com>
---
 kernel/sched/fair.c | 32 ++++++++++++++++++++++----------
 1 file changed, 22 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 92c3bcff5b6b..742462d41118 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5078,6 +5078,16 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
 }
 
 #ifdef CONFIG_SMP
+static inline void rq_idle_stamp_update(struct rq *rq)
+{
+	rq->idle_stamp = rq_clock(rq);
+}
+
+static inline void rq_idle_stamp_clear(struct rq *rq)
+{
+	rq->idle_stamp = 0;
+}
+
 static void overload_clear(struct rq *rq)
 {
 	struct sparsemask *overload_cpus;
@@ -5100,6 +5110,8 @@ static void overload_set(struct rq *rq)
 	rcu_read_unlock();
 }
 #else /* CONFIG_SMP */
+static inline void rq_idle_stamp_update(struct rq *rq) {}
+static inline void rq_idle_stamp_clear(struct rq *rq) {}
 static inline void overload_clear(struct rq *rq) {}
 static inline void overload_set(struct rq *rq) {}
 #endif
@@ -9011,8 +9023,17 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
 
 idle:
 	if (rf) {
+		/*
+		 * We must set idle_stamp _before_ calling idle_balance(), such that we
+		 * measure the duration of idle_balance() as idle time.
+		 */
+		rq_idle_stamp_update(rq);
+
 		new_tasks = sched_balance_newidle(rq, rf);
 
+		if (new_tasks)
+			rq_idle_stamp_clear(rq);
+
 		/*
 		 * Because sched_balance_newidle() releases (and re-acquires)
 		 * rq->lock, it is possible for any higher priority task to
@@ -12911,13 +12932,6 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
 	if (this_rq->ttwu_pending)
 		return 0;
 
-	/*
-	 * We must set idle_stamp _before_ calling sched_balance_rq()
-	 * for CPU_NEWLY_IDLE, such that we measure the this duration
-	 * as idle time.
-	 */
-	this_rq->idle_stamp = rq_clock(this_rq);
-
 	/*
 	 * Do not pull tasks towards !active CPUs...
 	 */
@@ -13026,9 +13040,7 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
 	if (time_after(this_rq->next_balance, next_balance))
 		this_rq->next_balance = next_balance;
 
-	if (pulled_task)
-		this_rq->idle_stamp = 0;
-	else
+	if (!pulled_task)
 		nohz_newidle_balance(this_rq);
 
 	rq_repin_lock(this_rq, rf);
-- 
2.34.1