From nobody Wed Feb 11 06:31:24 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E5788C83F2F for ; Thu, 31 Aug 2023 20:29:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1347371AbjHaU3u (ORCPT ); Thu, 31 Aug 2023 16:29:50 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:56604 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1347379AbjHaU3f (ORCPT ); Thu, 31 Aug 2023 16:29:35 -0400 Received: from ams.source.kernel.org (ams.source.kernel.org [145.40.68.75]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 70E91E5F for ; Thu, 31 Aug 2023 13:29:31 -0700 (PDT) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id 28244B823BA for ; Thu, 31 Aug 2023 20:29:30 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id D9243C433C8; Thu, 31 Aug 2023 20:29:19 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1693513768; bh=D0g/ZKiOLsOo1ElsaJCupefa/OgAsnQlMOj3mittTys=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=XRr3c7MlN8VuBec9lhmVcOtSI8e6fPwLyRhFo0rNtWA214ibBd8dG+fwM5QBoSB52 5/s+s6vMb3Amtq99GEfePQT3ZQMNzQPYsSqQq+4DP97/xpezZTCjdTdhuyxcu/SvAz A4NFNYpZ7CM2djoKnQ60E0CXFVYHQ6lSsJowBWRx4DBM6VkVH90uxHzkRh+vG8Y3LF oAaABctVDB8vBYVzFPbyZqtJMDlwh2jus3TRVAGobBzSDl/IIaqnT6naB7V5Ht+oG6 i+UJ7oQyrIah5lO1v3kjB3rD6dplqXXowPEkijOMxZH8eyU+2729qZlyEFOkCxpcsu cALjflCjrhC5w== From: Daniel Bristot de Oliveira To: Ingo Molnar , Peter Zijlstra , Juri Lelli , Vincent Guittot Cc: Dietmar Eggemann , Steven Rostedt , Ben Segall , Mel Gorman , Daniel Bristot de Oliveira , Valentin Schneider , linux-kernel@vger.kernel.org, Luca Abeni , Tommaso Cucinotta , Thomas Gleixner , Joel Fernandes , Vineeth Pillai , Shuah Khan , bristot@kernel.org, Phil Auld Subject: [PATCH v4 1/7] sched: Unify runtime accounting across classes Date: Thu, 31 Aug 2023 22:28:52 +0200 Message-Id: <093be922c23781bc90c2fde27eaad9ef6fc3051c.1693510979.git.bristot@kernel.org> X-Mailer: git-send-email 2.40.1 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Peter Zijlstra All classes use sched_entity::exec_start to track runtime and have copies of the exact same code around to compute runtime. Collapse all that. Reviewed-by: Phil Auld Reviewed-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Daniel Bristot de Oliveira Reviewed-by: Steven Rostedt (Google) --- include/linux/sched.h | 2 +- kernel/sched/deadline.c | 15 +++-------- kernel/sched/fair.c | 57 ++++++++++++++++++++++++++++++---------- kernel/sched/rt.c | 15 +++-------- kernel/sched/sched.h | 12 ++------- kernel/sched/stop_task.c | 13 +-------- 6 files changed, 53 insertions(+), 61 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 177b3f3676ef..639f6eb9bd4f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -520,7 +520,7 @@ struct sched_statistics { u64 block_max; s64 sum_block_runtime; =20 - u64 exec_max; + s64 exec_max; u64 slice_max; =20 u64 nr_migrations_cold; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 58b542bf2893..9a09d9dafd88 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1299,9 +1299,8 @@ static void update_curr_dl(struct rq *rq) { struct task_struct *curr =3D rq->curr; struct sched_dl_entity *dl_se =3D &curr->dl; - u64 delta_exec, scaled_delta_exec; + s64 delta_exec, scaled_delta_exec; int cpu =3D cpu_of(rq); - u64 now; =20 if (!dl_task(curr) || !on_dl_rq(dl_se)) return; @@ -1314,21 +1313,13 @@ static void update_curr_dl(struct rq *rq) * natural solution, but the full ramifications of this * approach need further study. */ - now =3D rq_clock_task(rq); - delta_exec =3D now - curr->se.exec_start; - if (unlikely((s64)delta_exec <=3D 0)) { + delta_exec =3D update_curr_common(rq); + if (unlikely(delta_exec <=3D 0)) { if (unlikely(dl_se->dl_yielded)) goto throttle; return; } =20 - schedstat_set(curr->stats.exec_max, - max(curr->stats.exec_max, delta_exec)); - - trace_sched_stat_runtime(curr, delta_exec, 0); - - update_current_exec_runtime(curr, now, delta_exec); - if (dl_entity_is_special(dl_se)) return; =20 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 911d0063763c..52c8219623b1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1092,23 +1092,17 @@ static void update_tg_load_avg(struct cfs_rq *cfs_r= q) } #endif /* CONFIG_SMP */ =20 -/* - * Update the current task's runtime statistics. - */ -static void update_curr(struct cfs_rq *cfs_rq) +static s64 update_curr_se(struct rq *rq, struct sched_entity *curr) { - struct sched_entity *curr =3D cfs_rq->curr; - u64 now =3D rq_clock_task(rq_of(cfs_rq)); - u64 delta_exec; - - if (unlikely(!curr)) - return; + u64 now =3D rq_clock_task(rq); + s64 delta_exec; =20 delta_exec =3D now - curr->exec_start; - if (unlikely((s64)delta_exec <=3D 0)) - return; + if (unlikely(delta_exec <=3D 0)) + return delta_exec; =20 curr->exec_start =3D now; + curr->sum_exec_runtime +=3D delta_exec; =20 if (schedstat_enabled()) { struct sched_statistics *stats; @@ -1118,8 +1112,43 @@ static void update_curr(struct cfs_rq *cfs_rq) max(delta_exec, stats->exec_max)); } =20 - curr->sum_exec_runtime +=3D delta_exec; - schedstat_add(cfs_rq->exec_clock, delta_exec); + return delta_exec; +} + +/* + * Used by other classes to account runtime. + */ +s64 update_curr_common(struct rq *rq) +{ + struct task_struct *curr =3D rq->curr; + s64 delta_exec; + + delta_exec =3D update_curr_se(rq, &curr->se); + if (unlikely(delta_exec <=3D 0)) + return delta_exec; + + trace_sched_stat_runtime(curr, delta_exec, 0); + + account_group_exec_runtime(curr, delta_exec); + cgroup_account_cputime(curr, delta_exec); + + return delta_exec; +} + +/* + * Update the current task's runtime statistics. + */ +static void update_curr(struct cfs_rq *cfs_rq) +{ + struct sched_entity *curr =3D cfs_rq->curr; + s64 delta_exec; + + if (unlikely(!curr)) + return; + + delta_exec =3D update_curr_se(rq_of(cfs_rq), curr); + if (unlikely(delta_exec <=3D 0)) + return; =20 curr->vruntime +=3D calc_delta_fair(delta_exec, curr); update_deadline(cfs_rq, curr); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 0597ba0f85ff..e23cc67c9467 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1046,24 +1046,15 @@ static void update_curr_rt(struct rq *rq) { struct task_struct *curr =3D rq->curr; struct sched_rt_entity *rt_se =3D &curr->rt; - u64 delta_exec; - u64 now; + s64 delta_exec; =20 if (curr->sched_class !=3D &rt_sched_class) return; =20 - now =3D rq_clock_task(rq); - delta_exec =3D now - curr->se.exec_start; - if (unlikely((s64)delta_exec <=3D 0)) + delta_exec =3D update_curr_common(rq); + if (unlikely(delta_exec <=3D 0)) return; =20 - schedstat_set(curr->stats.exec_max, - max(curr->stats.exec_max, delta_exec)); - - trace_sched_stat_runtime(curr, delta_exec, 0); - - update_current_exec_runtime(curr, now, delta_exec); - if (!rt_bandwidth_enabled()) return; =20 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 04846272409c..1def5b7fa1df 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2228,6 +2228,8 @@ struct affinity_context { unsigned int flags; }; =20 +extern s64 update_curr_common(struct rq *rq); + struct sched_class { =20 #ifdef CONFIG_UCLAMP_TASK @@ -3280,16 +3282,6 @@ extern int sched_dynamic_mode(const char *str); extern void sched_dynamic_update(int mode); #endif =20 -static inline void update_current_exec_runtime(struct task_struct *curr, - u64 now, u64 delta_exec) -{ - curr->se.sum_exec_runtime +=3D delta_exec; - account_group_exec_runtime(curr, delta_exec); - - curr->se.exec_start =3D now; - cgroup_account_cputime(curr, delta_exec); -} - #ifdef CONFIG_SCHED_MM_CID =20 #define SCHED_MM_CID_PERIOD_NS (100ULL * 1000000) /* 100ms */ diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 85590599b4d6..7595494ceb6d 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -70,18 +70,7 @@ static void yield_task_stop(struct rq *rq) =20 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) { - struct task_struct *curr =3D rq->curr; - u64 now, delta_exec; - - now =3D rq_clock_task(rq); - delta_exec =3D now - curr->se.exec_start; - if (unlikely((s64)delta_exec < 0)) - delta_exec =3D 0; - - schedstat_set(curr->stats.exec_max, - max(curr->stats.exec_max, delta_exec)); - - update_current_exec_runtime(curr, now, delta_exec); + update_curr_common(rq); } =20 /* --=20 2.40.1