Use existing helper function cpu_util_irq instead of referencing it
directly.
It was noted that avg_irq could be updated by different CPU than the one
which is trying to access it. avg_irq is updated with WRITE_ONCE. Use
READ_ONCE to access it in order to avoid any compiler optimizations.
Signed-off-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
---
kernel/sched/fair.c | 4 +---
kernel/sched/sched.h | 2 +-
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1aeca3f943a8..02631060ca7e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9221,10 +9221,8 @@ static inline bool others_have_blocked(struct rq *rq)
if (thermal_load_avg(rq))
return true;
-#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
- if (READ_ONCE(rq->avg_irq.util_avg))
+ if (cpu_util_irq(rq))
return true;
-#endif
return false;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e58a54bda77d..edc20c5cc7ce 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3125,7 +3125,7 @@ static inline bool uclamp_rq_is_idle(struct rq *rq)
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
static inline unsigned long cpu_util_irq(struct rq *rq)
{
- return rq->avg_irq.util_avg;
+ return READ_ONCE(rq->avg_irq.util_avg);
}
static inline
--
2.39.3
On Mon, 1 Jan 2024 at 16:47, Shrikanth Hegde <sshegde@linux.vnet.ibm.com> wrote:
>
> Use existing helper function cpu_util_irq instead of referencing it
> directly.
>
> It was noted that avg_irq could be updated by different CPU than the one
> which is trying to access it. avg_irq is updated with WRITE_ONCE. Use
> READ_ONCE to access it in order to avoid any compiler optimizations.
>
> Signed-off-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
> kernel/sched/fair.c | 4 +---
> kernel/sched/sched.h | 2 +-
> 2 files changed, 2 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 1aeca3f943a8..02631060ca7e 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9221,10 +9221,8 @@ static inline bool others_have_blocked(struct rq *rq)
> if (thermal_load_avg(rq))
> return true;
>
> -#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
> - if (READ_ONCE(rq->avg_irq.util_avg))
> + if (cpu_util_irq(rq))
> return true;
> -#endif
>
> return false;
> }
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index e58a54bda77d..edc20c5cc7ce 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -3125,7 +3125,7 @@ static inline bool uclamp_rq_is_idle(struct rq *rq)
> #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
> static inline unsigned long cpu_util_irq(struct rq *rq)
> {
> - return rq->avg_irq.util_avg;
> + return READ_ONCE(rq->avg_irq.util_avg);
> }
>
> static inline
> --
> 2.39.3
>
The following commit has been merged into the sched/core branch of tip:
Commit-ID: a6965b31888501f889261a6783f0de6afff84f8d
Gitweb: https://git.kernel.org/tip/a6965b31888501f889261a6783f0de6afff84f8d
Author: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
AuthorDate: Mon, 01 Jan 2024 21:16:24 +05:30
Committer: Ingo Molnar <mingo@kernel.org>
CommitterDate: Wed, 28 Feb 2024 15:11:15 +01:00
sched/fair: Add READ_ONCE() and use existing helper function to access ->avg_irq
Use existing helper function cpu_util_irq() instead of open-coding
access to ->avg_irq.
During review it was noted that ->avg_irq could be updated by a
different CPU than the one which is trying to access it.
->avg_irq is updated with WRITE_ONCE(), use READ_ONCE to access it
in order to avoid any compiler optimizations.
Signed-off-by: Shrikanth Hegde <sshegde@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20240101154624.100981-3-sshegde@linux.vnet.ibm.com
---
kernel/sched/fair.c | 4 +---
kernel/sched/sched.h | 2 +-
2 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 127e727..ba36339 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9246,10 +9246,8 @@ static inline bool others_have_blocked(struct rq *rq)
if (thermal_load_avg(rq))
return true;
-#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
- if (READ_ONCE(rq->avg_irq.util_avg))
+ if (cpu_util_irq(rq))
return true;
-#endif
return false;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 001fe04..d224267 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3136,7 +3136,7 @@ static inline bool uclamp_rq_is_idle(struct rq *rq)
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
static inline unsigned long cpu_util_irq(struct rq *rq)
{
- return rq->avg_irq.util_avg;
+ return READ_ONCE(rq->avg_irq.util_avg);
}
static inline
© 2016 - 2025 Red Hat, Inc.