[PATCH 1/2] powerpc: Use cpumask_next_wrap instead

Shrikanth Hegde posted 2 patches 6 days, 10 hours ago
[PATCH 1/2] powerpc: Use cpumask_next_wrap instead
Posted by Shrikanth Hegde 6 days, 10 hours ago
cpu = cpumask_next(cpu, mask)
if (cpu >= nr_cpu_ids)
    cpu = cpumask_first(mask)

Above block is identical to:
cpu = cpumask_next_wrap(cpu, mask)

Replace it, No change in functionality or performance.
Slightly simpler code.

Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
---
 arch/powerpc/kernel/irq.c             | 5 +----
 arch/powerpc/mm/book3s64/hash_utils.c | 4 +---
 arch/powerpc/sysdev/xive/common.c     | 5 +----
 3 files changed, 3 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index a0e8b998c9b5..f69de08ad347 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -370,10 +370,7 @@ int irq_choose_cpu(const struct cpumask *mask)
 do_round_robin:
 		raw_spin_lock_irqsave(&irq_rover_lock, flags);
 
-		irq_rover = cpumask_next(irq_rover, cpu_online_mask);
-		if (irq_rover >= nr_cpu_ids)
-			irq_rover = cpumask_first(cpu_online_mask);
-
+		irq_rover = cpumask_next_wrap(irq_rover, cpu_online_mask);
 		cpuid = irq_rover;
 
 		raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 9dc5889d6ecb..e4fcf929cb33 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1299,9 +1299,7 @@ static void stress_hpt_timer_fn(struct timer_list *timer)
 	if (!firmware_has_feature(FW_FEATURE_LPAR))
 		tlbiel_all();
 
-	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
-	if (next_cpu >= nr_cpu_ids)
-		next_cpu = cpumask_first(cpu_online_mask);
+	next_cpu = cpumask_next_wrap(raw_smp_processor_id(), cpu_online_mask);
 	stress_hpt_timer.expires = jiffies + msecs_to_jiffies(10);
 	add_timer_on(&stress_hpt_timer, next_cpu);
 }
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index e1a4f8a97393..b6446abe29a6 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -577,10 +577,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
 		 */
 		if (cpu_online(cpu) && xive_try_pick_target(cpu))
 			return cpu;
-		cpu = cpumask_next(cpu, mask);
-		/* Wrap around */
-		if (cpu >= nr_cpu_ids)
-			cpu = cpumask_first(mask);
+		cpu = cpumask_next_wrap(cpu, mask);
 	} while (cpu != first);
 
 	return -1;
-- 
2.47.3
Re: [PATCH 1/2] powerpc: Use cpumask_next_wrap instead
Posted by Yury Norov 4 days, 20 hours ago
On Fri, Mar 27, 2026 at 12:01:28PM +0530, Shrikanth Hegde wrote:
> cpu = cpumask_next(cpu, mask)
> if (cpu >= nr_cpu_ids)
>     cpu = cpumask_first(mask)
> 
> Above block is identical to:
> cpu = cpumask_next_wrap(cpu, mask)
> 
> Replace it, No change in functionality or performance.
> Slightly simpler code.
> 
> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
> ---
>  arch/powerpc/kernel/irq.c             | 5 +----
>  arch/powerpc/mm/book3s64/hash_utils.c | 4 +---
>  arch/powerpc/sysdev/xive/common.c     | 5 +----
>  3 files changed, 3 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
> index a0e8b998c9b5..f69de08ad347 100644
> --- a/arch/powerpc/kernel/irq.c
> +++ b/arch/powerpc/kernel/irq.c
> @@ -370,10 +370,7 @@ int irq_choose_cpu(const struct cpumask *mask)
>  do_round_robin:
>  		raw_spin_lock_irqsave(&irq_rover_lock, flags);
>  
> -		irq_rover = cpumask_next(irq_rover, cpu_online_mask);
> -		if (irq_rover >= nr_cpu_ids)
> -			irq_rover = cpumask_first(cpu_online_mask);
> -
> +		irq_rover = cpumask_next_wrap(irq_rover, cpu_online_mask);
>  		cpuid = irq_rover;
>  
>  		raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
> index 9dc5889d6ecb..e4fcf929cb33 100644
> --- a/arch/powerpc/mm/book3s64/hash_utils.c
> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
> @@ -1299,9 +1299,7 @@ static void stress_hpt_timer_fn(struct timer_list *timer)
>  	if (!firmware_has_feature(FW_FEATURE_LPAR))
>  		tlbiel_all();
>  
> -	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
> -	if (next_cpu >= nr_cpu_ids)
> -		next_cpu = cpumask_first(cpu_online_mask);
> +	next_cpu = cpumask_next_wrap(raw_smp_processor_id(), cpu_online_mask);
>  	stress_hpt_timer.expires = jiffies + msecs_to_jiffies(10);
>  	add_timer_on(&stress_hpt_timer, next_cpu);
>  }
> diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
> index e1a4f8a97393..b6446abe29a6 100644
> --- a/arch/powerpc/sysdev/xive/common.c
> +++ b/arch/powerpc/sysdev/xive/common.c
> @@ -577,10 +577,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
>  		 */
>  		if (cpu_online(cpu) && xive_try_pick_target(cpu))
>  			return cpu;
> -		cpu = cpumask_next(cpu, mask);
> -		/* Wrap around */
> -		if (cpu >= nr_cpu_ids)
> -			cpu = cpumask_first(mask);
> +		cpu = cpumask_next_wrap(cpu, mask);

Again, for all except this:

Reviewed-by: Yury Norov <yury.norov@gmail.com>


Can you move my series for xive_find_target_in_mask() together with
your patches?

>  	} while (cpu != first);
>  
>  	return -1;
> -- 
> 2.47.3
Re: [PATCH 1/2] powerpc: Use cpumask_next_wrap instead
Posted by Shrikanth Hegde 4 days, 7 hours ago
Hi Yury.

On 3/29/26 2:14 AM, Yury Norov wrote:
> On Fri, Mar 27, 2026 at 12:01:28PM +0530, Shrikanth Hegde wrote:
>> cpu = cpumask_next(cpu, mask)
>> if (cpu >= nr_cpu_ids)
>>      cpu = cpumask_first(mask)
>>
>> Above block is identical to:
>> cpu = cpumask_next_wrap(cpu, mask)
>>
>> Replace it, No change in functionality or performance.
>> Slightly simpler code.
>>
>> Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com>
>> ---
>>   arch/powerpc/kernel/irq.c             | 5 +----
>>   arch/powerpc/mm/book3s64/hash_utils.c | 4 +---
>>   arch/powerpc/sysdev/xive/common.c     | 5 +----
>>   3 files changed, 3 insertions(+), 11 deletions(-)
>>
>> diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
>> index a0e8b998c9b5..f69de08ad347 100644
>> --- a/arch/powerpc/kernel/irq.c
>> +++ b/arch/powerpc/kernel/irq.c
>> @@ -370,10 +370,7 @@ int irq_choose_cpu(const struct cpumask *mask)
>>   do_round_robin:
>>   		raw_spin_lock_irqsave(&irq_rover_lock, flags);
>>   
>> -		irq_rover = cpumask_next(irq_rover, cpu_online_mask);
>> -		if (irq_rover >= nr_cpu_ids)
>> -			irq_rover = cpumask_first(cpu_online_mask);
>> -
>> +		irq_rover = cpumask_next_wrap(irq_rover, cpu_online_mask);
>>   		cpuid = irq_rover;
>>   
>>   		raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
>> diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
>> index 9dc5889d6ecb..e4fcf929cb33 100644
>> --- a/arch/powerpc/mm/book3s64/hash_utils.c
>> +++ b/arch/powerpc/mm/book3s64/hash_utils.c
>> @@ -1299,9 +1299,7 @@ static void stress_hpt_timer_fn(struct timer_list *timer)
>>   	if (!firmware_has_feature(FW_FEATURE_LPAR))
>>   		tlbiel_all();
>>   
>> -	next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
>> -	if (next_cpu >= nr_cpu_ids)
>> -		next_cpu = cpumask_first(cpu_online_mask);
>> +	next_cpu = cpumask_next_wrap(raw_smp_processor_id(), cpu_online_mask);
>>   	stress_hpt_timer.expires = jiffies + msecs_to_jiffies(10);
>>   	add_timer_on(&stress_hpt_timer, next_cpu);
>>   }
>> diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
>> index e1a4f8a97393..b6446abe29a6 100644
>> --- a/arch/powerpc/sysdev/xive/common.c
>> +++ b/arch/powerpc/sysdev/xive/common.c
>> @@ -577,10 +577,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
>>   		 */
>>   		if (cpu_online(cpu) && xive_try_pick_target(cpu))
>>   			return cpu;
>> -		cpu = cpumask_next(cpu, mask);
>> -		/* Wrap around */
>> -		if (cpu >= nr_cpu_ids)
>> -			cpu = cpumask_first(mask);
>> +		cpu = cpumask_next_wrap(cpu, mask);
> 
> Again, for all except this:
> 
> Reviewed-by: Yury Norov <yury.norov@gmail.com>
> 

Thanks for the review.

> 
> Can you move my series for xive_find_target_in_mask() together with
> your patches?

Sure.
I can add your patch to this series and send it.

PS: Sorry, I hadn't seen your series.