[PATCH v4 5/8] smp: Move cpu_up/down helpers to common code

Mykyta Poturai posted 8 patches 6 days, 15 hours ago
[PATCH v4 5/8] smp: Move cpu_up/down helpers to common code
Posted by Mykyta Poturai 6 days, 15 hours ago
This will reduce code duplication for the upcoming cpu hotplug support
on Arm64 patch.

SMT-disable enforcement check is moved into a separate
architecture-specific function.

Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>

v3->v4:
* patch introduced
---
 xen/arch/arm/smp.c             |  6 ++++++
 xen/arch/ppc/stubs.c           |  4 ++++
 xen/arch/riscv/stubs.c         |  5 +++++
 xen/arch/x86/include/asm/smp.h |  3 ---
 xen/arch/x86/smp.c             | 33 +++------------------------------
 xen/common/smp.c               | 32 ++++++++++++++++++++++++++++++++
 xen/include/xen/smp.h          |  4 ++++
 7 files changed, 54 insertions(+), 33 deletions(-)

diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c
index b372472188..85815aeda0 100644
--- a/xen/arch/arm/smp.c
+++ b/xen/arch/arm/smp.c
@@ -44,6 +44,12 @@ void smp_send_call_function_mask(const cpumask_t *mask)
     }
 }
 
+/* ARM don't have SMT so we don't need any special logic for CPU disabling  */
+bool arch_smt_cpu_disable(unsigned int cpu)
+{
+    return false;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/ppc/stubs.c b/xen/arch/ppc/stubs.c
index bdaf474c5c..ca97cec51f 100644
--- a/xen/arch/ppc/stubs.c
+++ b/xen/arch/ppc/stubs.c
@@ -101,6 +101,10 @@ void smp_send_call_function_mask(const cpumask_t *mask)
     BUG_ON("unimplemented");
 }
 
+bool arch_smt_cpu_disable(unsigned int cpu)
+{
+    BUG_ON("unimplemented");
+}
 /* irq.c */
 
 void irq_ack_none(struct irq_desc *desc)
diff --git a/xen/arch/riscv/stubs.c b/xen/arch/riscv/stubs.c
index 1a8c86cd8d..60610349cb 100644
--- a/xen/arch/riscv/stubs.c
+++ b/xen/arch/riscv/stubs.c
@@ -80,6 +80,11 @@ void smp_send_call_function_mask(const cpumask_t *mask)
     BUG_ON("unimplemented");
 }
 
+bool arch_smt_cpu_disable(unsigned int cpu)
+{
+    BUG_ON("unimplemented");
+}
+
 /* irq.c */
 
 void irq_ack_none(struct irq_desc *desc)
diff --git a/xen/arch/x86/include/asm/smp.h b/xen/arch/x86/include/asm/smp.h
index 60eb4ac254..b77fc0bc6d 100644
--- a/xen/arch/x86/include/asm/smp.h
+++ b/xen/arch/x86/include/asm/smp.h
@@ -50,9 +50,6 @@ int cpu_add(uint32_t apic_id, uint32_t acpi_id, uint32_t pxm);
 
 void __stop_this_cpu(void);
 
-long cf_check cpu_up_helper(void *data);
-long cf_check cpu_down_helper(void *data);
-
 long cf_check core_parking_helper(void *data);
 bool core_parking_remove(unsigned int cpu);
 uint32_t get_cur_idle_nums(void);
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index 7936294f5f..d64b533cc0 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -418,35 +418,8 @@ void cf_check call_function_interrupt(void)
     smp_call_function_interrupt();
 }
 
-long cf_check cpu_up_helper(void *data)
+bool arch_smt_cpu_disable(unsigned int cpu)
 {
-    unsigned int cpu = (unsigned long)data;
-    int ret = cpu_up(cpu);
-
-    /* Have one more go on EBUSY. */
-    if ( ret == -EBUSY )
-        ret = cpu_up(cpu);
-
-    if ( !ret && !opt_smt &&
-         cpu_data[cpu].compute_unit_id == INVALID_CUID &&
-         cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) > 1 )
-    {
-        ret = cpu_down_helper(data);
-        if ( ret )
-            printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
-        else
-            ret = -EPERM;
-    }
-
-    return ret;
-}
-
-long cf_check cpu_down_helper(void *data)
-{
-    int cpu = (unsigned long)data;
-    int ret = cpu_down(cpu);
-    /* Have one more go on EBUSY. */
-    if ( ret == -EBUSY )
-        ret = cpu_down(cpu);
-    return ret;
+    return !opt_smt && cpu_data[cpu].compute_unit_id == INVALID_CUID &&
+           cpumask_weight(per_cpu(cpu_sibling_mask, cpu)) > 1;
 }
diff --git a/xen/common/smp.c b/xen/common/smp.c
index a011f541f1..114c1da77d 100644
--- a/xen/common/smp.c
+++ b/xen/common/smp.c
@@ -16,6 +16,7 @@
  * GNU General Public License for more details.
  */
 
+#include <xen/cpu.h>
 #include <asm/hardirq.h>
 #include <asm/processor.h>
 #include <xen/spinlock.h>
@@ -104,6 +105,37 @@ void smp_call_function_interrupt(void)
     irq_exit();
 }
 
+long cf_check cpu_up_helper(void *data)
+{
+    unsigned int cpu = (unsigned long)data;
+    int ret = cpu_up(cpu);
+
+    /* Have one more go on EBUSY. */
+    if ( ret == -EBUSY )
+        ret = cpu_up(cpu);
+
+    if ( !ret && arch_smt_cpu_disable(cpu) )
+    {
+        ret = cpu_down_helper(data);
+        if ( ret )
+            printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
+        else
+            ret = -EPERM;
+    }
+
+    return ret;
+}
+
+long cf_check cpu_down_helper(void *data)
+{
+    int cpu = (unsigned long)data;
+    int ret = cpu_down(cpu);
+    /* Have one more go on EBUSY. */
+    if ( ret == -EBUSY )
+        ret = cpu_down(cpu);
+    return ret;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/xen/smp.h b/xen/include/xen/smp.h
index 2ca9ff1bfc..c734033bfb 100644
--- a/xen/include/xen/smp.h
+++ b/xen/include/xen/smp.h
@@ -76,4 +76,8 @@ extern void *stack_base[NR_CPUS];
 void initialize_cpu_data(unsigned int cpu);
 int setup_cpu_root_pgt(unsigned int cpu);
 
+bool arch_smt_cpu_disable(unsigned int cpu);
+long cf_check cpu_up_helper(void *data);
+long cf_check cpu_down_helper(void *data);
+
 #endif /* __XEN_SMP_H__ */
-- 
2.51.2
Re: [PATCH v4 5/8] smp: Move cpu_up/down helpers to common code
Posted by Julien Grall 2 days, 14 hours ago
Hi,

On 12/11/2025 10:51, Mykyta Poturai wrote:
> This will reduce code duplication for the upcoming cpu hotplug support
> on Arm64 patch.
> 
> SMT-disable enforcement check is moved into a separate
> architecture-specific function.
> 
> Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>
> 
> v3->v4:
> * patch introduced
> ---
>   xen/arch/arm/smp.c             |  6 ++++++
>   xen/arch/ppc/stubs.c           |  4 ++++
>   xen/arch/riscv/stubs.c         |  5 +++++
>   xen/arch/x86/include/asm/smp.h |  3 ---
>   xen/arch/x86/smp.c             | 33 +++------------------------------
>   xen/common/smp.c               | 32 ++++++++++++++++++++++++++++++++
>   xen/include/xen/smp.h          |  4 ++++
>   7 files changed, 54 insertions(+), 33 deletions(-)
> 
> diff --git a/xen/arch/arm/smp.c b/xen/arch/arm/smp.c
> index b372472188..85815aeda0 100644
> --- a/xen/arch/arm/smp.c
> +++ b/xen/arch/arm/smp.c
> @@ -44,6 +44,12 @@ void smp_send_call_function_mask(const cpumask_t *mask)
>       }
>   }
>   
> +/* ARM don't have SMT so we don't need any special logic for CPU disabling  */

Xen doesn't support SMT on Arm. But some of the cores may support SMT. 
So would reword this to:

"We currently don't support SMT"

Cheers,

-- 
Julien Grall
Re: [PATCH v4 5/8] smp: Move cpu_up/down helpers to common code
Posted by Jan Beulich 5 days, 15 hours ago
On 12.11.2025 11:51, Mykyta Poturai wrote:
> This will reduce code duplication for the upcoming cpu hotplug support
> on Arm64 patch.
> 
> SMT-disable enforcement check is moved into a separate
> architecture-specific function.
> 
> Signed-off-by: Mykyta Poturai <mykyta_poturai@epam.com>

Solely from an x86 perspective this looks okay to me, but on Arm you introduce
...

> --- a/xen/common/smp.c
> +++ b/xen/common/smp.c
> @@ -16,6 +16,7 @@
>   * GNU General Public License for more details.
>   */
>  
> +#include <xen/cpu.h>
>  #include <asm/hardirq.h>
>  #include <asm/processor.h>
>  #include <xen/spinlock.h>
> @@ -104,6 +105,37 @@ void smp_call_function_interrupt(void)
>      irq_exit();
>  }
>  
> +long cf_check cpu_up_helper(void *data)
> +{
> +    unsigned int cpu = (unsigned long)data;
> +    int ret = cpu_up(cpu);
> +
> +    /* Have one more go on EBUSY. */
> +    if ( ret == -EBUSY )
> +        ret = cpu_up(cpu);
> +
> +    if ( !ret && arch_smt_cpu_disable(cpu) )
> +    {
> +        ret = cpu_down_helper(data);
> +        if ( ret )
> +            printk("Could not re-offline CPU%u (%d)\n", cpu, ret);
> +        else
> +            ret = -EPERM;
> +    }
> +
> +    return ret;
> +}
> +
> +long cf_check cpu_down_helper(void *data)
> +{
> +    int cpu = (unsigned long)data;
> +    int ret = cpu_down(cpu);
> +    /* Have one more go on EBUSY. */
> +    if ( ret == -EBUSY )
> +        ret = cpu_down(cpu);
> +    return ret;
> +}

...unreachable code, which - for the case when RUNTIME_CPU_CONTROL=n - won't
even be rectified by the next patch.

Jan