[PATCH v6 2/5] arm/irq: Migrate IRQs during CPU up/down operations

Mykyta Poturai posted 5 patches 1 week, 4 days ago
[PATCH v6 2/5] arm/irq: Migrate IRQs during CPU up/down operations
Posted by Mykyta Poturai 1 week, 4 days ago
Move IRQs from dying CPU to the online ones when a CPU is getting
offlined. When onlining, rebalance all IRQs in a round-robin fashion.
Guest-bound IRQs are already handled by scheduler in the process of
moving vCPUs to active pCPUs, so we only need to handle IRQs used by Xen
itself.

Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
---
v5->v6:
* don't do any balancing on boot
* only do balancing when cpu hotplug is enabled

v4->v5:
* handle CPU onlining as well
* more comments
* fix crash when ESPI is disabled
* don't assume CPU 0 is a boot CPU
* use insigned int for irq number
* remove assumption that all irqs a bound to CPU 0 by default from the
  commit message

v3->v4:
* patch introduced
---
 xen/arch/arm/include/asm/irq.h |  4 +++
 xen/arch/arm/irq.c             | 60 ++++++++++++++++++++++++++++++++++
 xen/arch/arm/smpboot.c         |  8 +++++
 3 files changed, 72 insertions(+)

diff --git a/xen/arch/arm/include/asm/irq.h b/xen/arch/arm/include/asm/irq.h
index 09788dbfeb..a3897ec62d 100644
--- a/xen/arch/arm/include/asm/irq.h
+++ b/xen/arch/arm/include/asm/irq.h
@@ -126,6 +126,10 @@ bool irq_type_set_by_domain(const struct domain *d);
 void irq_end_none(struct irq_desc *irq);
 #define irq_end_none irq_end_none
 
+#ifdef CONFIG_CPU_HOTPLUG
+void rebalance_irqs(unsigned int from, bool up);
+#endif
+
 #endif /* _ASM_HW_IRQ_H */
 /*
  * Local variables:
diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
index 7204bc2b68..d428d3118b 100644
--- a/xen/arch/arm/irq.c
+++ b/xen/arch/arm/irq.c
@@ -158,6 +158,60 @@ static int init_local_irq_data(unsigned int cpu)
     return 0;
 }
 
+#ifdef CONFIG_CPU_HOTPLUG
+static int cpu_next;
+
+static void balance_irq(int irq, unsigned int from, bool up)
+{
+    struct irq_desc *desc = irq_to_desc(irq);
+    unsigned long flags;
+
+    ASSERT(!cpumask_empty(&cpu_online_map));
+
+    spin_lock_irqsave(&desc->lock, flags);
+    if ( likely(!desc->action) )
+        goto out;
+
+    if ( likely(test_bit(_IRQ_GUEST, &desc->status) ||
+                test_bit(_IRQ_MOVE_PENDING, &desc->status)) )
+        goto out;
+
+    /*
+     * Setting affinity to a mask of multiple CPUs causes the GIC drivers to
+     * select one CPU from that mask. If the dying CPU was included in the IRQ's
+     * affinity mask, we cannot determine exactly which CPU the interrupt is
+     * currently routed to, as GIC drivers lack a concrete get_affinity API. So
+     * to be safe we must reroute it to a new, definitely online, CPU. In the
+     * case of CPU going down, we move only the interrupt that could reside on
+     * it. Otherwise, we rearrange all interrupts in a round-robin fashion.
+     */
+    if ( !up && !cpumask_test_cpu(from, desc->affinity) )
+        goto out;
+
+    cpu_next = cpumask_cycle(cpu_next, &cpu_online_map);
+    irq_set_affinity(desc, cpumask_of(cpu_next));
+
+out:
+    spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+void rebalance_irqs(unsigned int from, bool up)
+{
+    int irq;
+
+    if ( cpumask_empty(&cpu_online_map) )
+        return;
+
+    for ( irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++ )
+        balance_irq(irq, from, up);
+
+#ifdef CONFIG_GICV3_ESPI
+    for ( irq = ESPI_BASE_INTID; irq < ESPI_MAX_INTID; irq++ )
+        balance_irq(irq, from, up);
+#endif
+}
+#endif /* CONFIG_CPU_HOTPLUG */
+
 static int cpu_callback(struct notifier_block *nfb, unsigned long action,
                         void *hcpu)
 {
@@ -172,6 +226,12 @@ static int cpu_callback(struct notifier_block *nfb, unsigned long action,
             printk(XENLOG_ERR "Unable to allocate local IRQ for CPU%u\n",
                    cpu);
         break;
+    case CPU_ONLINE:
+#ifdef CONFIG_CPU_HOTPLUG
+        if ( system_state >= SYS_STATE_active )
+            rebalance_irqs(cpu, true);
+#endif
+        break;
     }
 
     return notifier_from_errno(rc);
diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
index 7f3cfa812e..f17e88e678 100644
--- a/xen/arch/arm/smpboot.c
+++ b/xen/arch/arm/smpboot.c
@@ -425,6 +425,14 @@ void __cpu_disable(void)
 
     smp_mb();
 
+    /*
+     * Now that the interrupts are cleared and the CPU marked as offline,
+     * move interrupts out of it
+     */
+#ifdef CONFIG_CPU_HOTPLUG
+    rebalance_irqs(cpu, false);
+#endif
+
     /* Return to caller; eventually the IPI mechanism will unwind and the 
      * scheduler will drop to the idle loop, which will call stop_cpu(). */
 }
-- 
2.51.2
Re: [PATCH v6 2/5] arm/irq: Migrate IRQs during CPU up/down operations
Posted by Bertrand Marquis 5 days, 11 hours ago
Hi Mykyta,

> On 12 Mar 2026, at 10:39, Mykyta Poturai <Mykyta_Poturai@epam.com> wrote:
> 
> Move IRQs from dying CPU to the online ones when a CPU is getting
> offlined. When onlining, rebalance all IRQs in a round-robin fashion.
> Guest-bound IRQs are already handled by scheduler in the process of
> moving vCPUs to active pCPUs, so we only need to handle IRQs used by Xen
> itself.
> 
> Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
> ---
> v5->v6:
> * don't do any balancing on boot
> * only do balancing when cpu hotplug is enabled
> 
> v4->v5:
> * handle CPU onlining as well
> * more comments
> * fix crash when ESPI is disabled
> * don't assume CPU 0 is a boot CPU
> * use insigned int for irq number
> * remove assumption that all irqs a bound to CPU 0 by default from the
>  commit message
> 
> v3->v4:
> * patch introduced
> ---
> xen/arch/arm/include/asm/irq.h |  4 +++
> xen/arch/arm/irq.c             | 60 ++++++++++++++++++++++++++++++++++
> xen/arch/arm/smpboot.c         |  8 +++++
> 3 files changed, 72 insertions(+)
> 
> diff --git a/xen/arch/arm/include/asm/irq.h b/xen/arch/arm/include/asm/irq.h
> index 09788dbfeb..a3897ec62d 100644
> --- a/xen/arch/arm/include/asm/irq.h
> +++ b/xen/arch/arm/include/asm/irq.h
> @@ -126,6 +126,10 @@ bool irq_type_set_by_domain(const struct domain *d);
> void irq_end_none(struct irq_desc *irq);
> #define irq_end_none irq_end_none
> 
> +#ifdef CONFIG_CPU_HOTPLUG
> +void rebalance_irqs(unsigned int from, bool up);
> +#endif

Could you make here something like:
#else
static inline void rebalance_irqs(unsigned int from, bool up) {}
#endif

so that ...

> +
> #endif /* _ASM_HW_IRQ_H */
> /*
>  * Local variables:
> diff --git a/xen/arch/arm/irq.c b/xen/arch/arm/irq.c
> index 7204bc2b68..d428d3118b 100644
> --- a/xen/arch/arm/irq.c
> +++ b/xen/arch/arm/irq.c
> @@ -158,6 +158,60 @@ static int init_local_irq_data(unsigned int cpu)
>     return 0;
> }
> 
> +#ifdef CONFIG_CPU_HOTPLUG
> +static int cpu_next;
> +
> +static void balance_irq(int irq, unsigned int from, bool up)
> +{
> +    struct irq_desc *desc = irq_to_desc(irq);
> +    unsigned long flags;
> +
> +    ASSERT(!cpumask_empty(&cpu_online_map));
> +
> +    spin_lock_irqsave(&desc->lock, flags);
> +    if ( likely(!desc->action) )
> +        goto out;
> +
> +    if ( likely(test_bit(_IRQ_GUEST, &desc->status) ||
> +                test_bit(_IRQ_MOVE_PENDING, &desc->status)) )
> +        goto out;
> +
> +    /*
> +     * Setting affinity to a mask of multiple CPUs causes the GIC drivers to
> +     * select one CPU from that mask. If the dying CPU was included in the IRQ's
> +     * affinity mask, we cannot determine exactly which CPU the interrupt is
> +     * currently routed to, as GIC drivers lack a concrete get_affinity API. So
> +     * to be safe we must reroute it to a new, definitely online, CPU. In the
> +     * case of CPU going down, we move only the interrupt that could reside on
> +     * it. Otherwise, we rearrange all interrupts in a round-robin fashion.
> +     */
> +    if ( !up && !cpumask_test_cpu(from, desc->affinity) )
> +        goto out;
> +
> +    cpu_next = cpumask_cycle(cpu_next, &cpu_online_map);
> +    irq_set_affinity(desc, cpumask_of(cpu_next));
> +
> +out:
> +    spin_unlock_irqrestore(&desc->lock, flags);
> +}
> +
> +void rebalance_irqs(unsigned int from, bool up)
> +{
> +    int irq;
> +
> +    if ( cpumask_empty(&cpu_online_map) )
> +        return;
> +
> +    for ( irq = NR_LOCAL_IRQS; irq < NR_IRQS; irq++ )
> +        balance_irq(irq, from, up);
> +
> +#ifdef CONFIG_GICV3_ESPI
> +    for ( irq = ESPI_BASE_INTID; irq < ESPI_MAX_INTID; irq++ )
> +        balance_irq(irq, from, up);
> +#endif
> +}
> +#endif /* CONFIG_CPU_HOTPLUG */
> +
> static int cpu_callback(struct notifier_block *nfb, unsigned long action,
>                         void *hcpu)
> {
> @@ -172,6 +226,12 @@ static int cpu_callback(struct notifier_block *nfb, unsigned long action,
>             printk(XENLOG_ERR "Unable to allocate local IRQ for CPU%u\n",
>                    cpu);
>         break;
> +    case CPU_ONLINE:
> +#ifdef CONFIG_CPU_HOTPLUG
> +        if ( system_state >= SYS_STATE_active )
> +            rebalance_irqs(cpu, true);
> +#endif
> +        break;

This ifdef could be switched to if IS_ENABLED

>     }
> 
>     return notifier_from_errno(rc);
> diff --git a/xen/arch/arm/smpboot.c b/xen/arch/arm/smpboot.c
> index 7f3cfa812e..f17e88e678 100644
> --- a/xen/arch/arm/smpboot.c
> +++ b/xen/arch/arm/smpboot.c
> @@ -425,6 +425,14 @@ void __cpu_disable(void)
> 
>     smp_mb();
> 
> +    /*
> +     * Now that the interrupts are cleared and the CPU marked as offline,
> +     * move interrupts out of it
> +     */
> +#ifdef CONFIG_CPU_HOTPLUG
> +    rebalance_irqs(cpu, false);
> +#endif

and this one to.

Doing it without the static inline will end up in an error and i think it is clearer
to have IS_ENABLED here so that it is clear from the code that nothing is done
if the config is not enabled.

But happy to remove the IS_ENABLED part if other think differently.

Cheers
Bertrand

> +
>     /* Return to caller; eventually the IPI mechanism will unwind and the 
>      * scheduler will drop to the idle loop, which will call stop_cpu(). */
> }
> -- 
> 2.51.2