arch/riscv/kernel/unaligned_access_speed.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
Replace the opencoded for_each_cpu(cpu, cpu_online_mask) loop with the
more readable and equivalent for_each_online_cpu(cpu) macro.
Signed-off-by: Fushuai Wang <wangfushuai@baidu.com>
---
arch/riscv/kernel/unaligned_access_speed.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
index ae2068425fbc..5e11b1bd9b2a 100644
--- a/arch/riscv/kernel/unaligned_access_speed.c
+++ b/arch/riscv/kernel/unaligned_access_speed.c
@@ -150,7 +150,7 @@ static void __init check_unaligned_access_speed_all_cpus(void)
* Allocate separate buffers for each CPU so there's no fighting over
* cache lines.
*/
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_online_cpu(cpu) {
bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
if (!bufs[cpu]) {
pr_warn("Allocation failure, not measuring misaligned performance\n");
@@ -165,7 +165,7 @@ static void __init check_unaligned_access_speed_all_cpus(void)
smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
out:
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_online_cpu(cpu) {
if (bufs[cpu])
__free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
}
--
2.36.1
On Sun, Aug 10, 2025 at 11:34 PM Fushuai Wang <wangfushuai@baidu.com> wrote:
>
> Replace the opencoded for_each_cpu(cpu, cpu_online_mask) loop with the
> more readable and equivalent for_each_online_cpu(cpu) macro.
>
> Signed-off-by: Fushuai Wang <wangfushuai@baidu.com>
> ---
> arch/riscv/kernel/unaligned_access_speed.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c
> index ae2068425fbc..5e11b1bd9b2a 100644
> --- a/arch/riscv/kernel/unaligned_access_speed.c
> +++ b/arch/riscv/kernel/unaligned_access_speed.c
> @@ -150,7 +150,7 @@ static void __init check_unaligned_access_speed_all_cpus(void)
> * Allocate separate buffers for each CPU so there's no fighting over
> * cache lines.
> */
> - for_each_cpu(cpu, cpu_online_mask) {
> + for_each_online_cpu(cpu) {
> bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
> if (!bufs[cpu]) {
> pr_warn("Allocation failure, not measuring misaligned performance\n");
> @@ -165,7 +165,7 @@ static void __init check_unaligned_access_speed_all_cpus(void)
> smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
>
> out:
> - for_each_cpu(cpu, cpu_online_mask) {
> + for_each_online_cpu(cpu) {
Good find.
Reviewed-by: Jesse Taube <jesse@rivosinc.com>
> if (bufs[cpu])
> __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
> }
> --
> 2.36.1
>
© 2016 - 2026 Red Hat, Inc.