kernel/sched/fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
The commit 3eb6d6ececca ("sched/fair: Refactor CPU utilization functions")
refactored cpu_util_without and cpu_util functions. Since the size of
cpu_util function has increased, the inline cpu_util is dropped. This had
a negative impact on performance, in the scenario of updating
sched_group's statistics, cpu_util_without and cpu_util functions are on
the hotspot path.
Inlining cpu_util_without and cpu_util functions have been shown to
significantly improve performance in lmbench as follow:
Machine: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
before after diff
fork+exit 317.0625 303.6667 -4.22%
fork+execve 1482.5000 1407.0000 -5.09%
fork+/bin/sh 2096.0000 2020.3333 -3.61%
This patch introduces inlining to cpu_util_without and cpu_util functions.
While this increases the size of kernel/sched/fair.o, the performance
gains in critical workloads make this an acceptable trade-off.
Size comparison before and after patch:
text data bss dec hex filename
0x1264a 0x1506 0xb0 80896 13c00 kernel/sched/fair.o.before
0x12672 0x14fe 0xb0 80928 13c20 kernel/sched/fair.o.after
Signed-off-by: Zhang Qiao <zhangqiao22@huawei.com>
Signed-off-by: Li Zetao <lizetao1@huawei.com>
---
kernel/sched/fair.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5904405ffc59..677b78fa65b6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7706,7 +7706,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
*
* Return: (Boosted) (estimated) utilization for the specified CPU.
*/
-static unsigned long
+static __always_inline unsigned long
cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
{
struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
@@ -7794,7 +7794,7 @@ unsigned long cpu_util_cfs_boost(int cpu)
* utilization of the specified task, whenever the task is currently
* contributing to the CPU utilization.
*/
-static unsigned long cpu_util_without(int cpu, struct task_struct *p)
+static __always_inline unsigned long cpu_util_without(int cpu, struct task_struct *p)
{
/* Task has no contribution or is new */
if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
--
2.34.1
On Tue, Jul 23, 2024 at 03:36:07PM +0800, Li Zetao wrote:
> The commit 3eb6d6ececca ("sched/fair: Refactor CPU utilization functions")
> refactored cpu_util_without and cpu_util functions. Since the size of
> cpu_util function has increased, the inline cpu_util is dropped. This had
> a negative impact on performance, in the scenario of updating
> sched_group's statistics, cpu_util_without and cpu_util functions are on
> the hotspot path.
>
> Inlining cpu_util_without and cpu_util functions have been shown to
> significantly improve performance in lmbench as follow:
>
> Machine: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
> before after diff
> fork+exit 317.0625 303.6667 -4.22%
> fork+execve 1482.5000 1407.0000 -5.09%
> fork+/bin/sh 2096.0000 2020.3333 -3.61%
That's quite significant. Did you look at what exactly was causing this?
> This patch introduces inlining to cpu_util_without and cpu_util functions.
> While this increases the size of kernel/sched/fair.o, the performance
> gains in critical workloads make this an acceptable trade-off.
>
> Size comparison before and after patch:
> text data bss dec hex filename
> 0x1264a 0x1506 0xb0 80896 13c00 kernel/sched/fair.o.before
> 0x12672 0x14fe 0xb0 80928 13c20 kernel/sched/fair.o.after
>
> Signed-off-by: Zhang Qiao <zhangqiao22@huawei.com>
> Signed-off-by: Li Zetao <lizetao1@huawei.com>
This SoB chain is not valid. Please review the documentation we have
on this.
> ---
> kernel/sched/fair.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 5904405ffc59..677b78fa65b6 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -7706,7 +7706,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
> *
> * Return: (Boosted) (estimated) utilization for the specified CPU.
> */
> -static unsigned long
> +static __always_inline unsigned long
> cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
> {
> struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
> @@ -7794,7 +7794,7 @@ unsigned long cpu_util_cfs_boost(int cpu)
> * utilization of the specified task, whenever the task is currently
> * contributing to the CPU utilization.
> */
> -static unsigned long cpu_util_without(int cpu, struct task_struct *p)
> +static __always_inline unsigned long cpu_util_without(int cpu, struct task_struct *p)
> {
> /* Task has no contribution or is new */
> if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
> --
> 2.34.1
>
Hi,
在 2024/7/24 18:53, Peter Zijlstra 写道:
> On Tue, Jul 23, 2024 at 03:36:07PM +0800, Li Zetao wrote:
>> The commit 3eb6d6ececca ("sched/fair: Refactor CPU utilization functions")
>> refactored cpu_util_without and cpu_util functions. Since the size of
>> cpu_util function has increased, the inline cpu_util is dropped. This had
>> a negative impact on performance, in the scenario of updating
>> sched_group's statistics, cpu_util_without and cpu_util functions are on
>> the hotspot path.
>>
>> Inlining cpu_util_without and cpu_util functions have been shown to
>> significantly improve performance in lmbench as follow:
>>
>> Machine: Intel(R) Xeon(R) Gold 6248 CPU @ 2.50GHz
>> before after diff
>> fork+exit 317.0625 303.6667 -4.22%
>> fork+execve 1482.5000 1407.0000 -5.09%
>> fork+/bin/sh 2096.0000 2020.3333 -3.61%
>
> That's quite significant. Did you look at what exactly was causing this?
In the fork scene, in order to select a CPU for new task, there is the
following hotspot path:
sched_balance_find_dst_cpu() ->
sched_balance_find_dst_group() ->
update_sg_wakeup_stats() -> cpu_util_without() -> cpu_util()
When the system has a large number of CPUs, in order to calculate the
util of sched_group, the cpu_util() will be called many times.
>
>> This patch introduces inlining to cpu_util_without and cpu_util functions.
>> While this increases the size of kernel/sched/fair.o, the performance
>> gains in critical workloads make this an acceptable trade-off.
>>
>> Size comparison before and after patch:
>> text data bss dec hex filename
>> 0x1264a 0x1506 0xb0 80896 13c00 kernel/sched/fair.o.before
>> 0x12672 0x14fe 0xb0 80928 13c20 kernel/sched/fair.o.after
>>
>> Signed-off-by: Zhang Qiao <zhangqiao22@huawei.com>
>> Signed-off-by: Li Zetao <lizetao1@huawei.com>
>
> This SoB chain is not valid. Please review the documentation we have
> on this.
I will fix this in v2 version.
>
>> ---
>> kernel/sched/fair.c | 4 ++--
>> 1 file changed, 2 insertions(+), 2 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 5904405ffc59..677b78fa65b6 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -7706,7 +7706,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
>> *
>> * Return: (Boosted) (estimated) utilization for the specified CPU.
>> */
>> -static unsigned long
>> +static __always_inline unsigned long
>> cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
>> {
>> struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
>> @@ -7794,7 +7794,7 @@ unsigned long cpu_util_cfs_boost(int cpu)
>> * utilization of the specified task, whenever the task is currently
>> * contributing to the CPU utilization.
>> */
>> -static unsigned long cpu_util_without(int cpu, struct task_struct *p)
>> +static __always_inline unsigned long cpu_util_without(int cpu, struct task_struct *p)
>> {
>> /* Task has no contribution or is new */
>> if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
>> --
>> 2.34.1
>>
>
Li Zetao
Best regard
© 2016 - 2026 Red Hat, Inc.