[PATCH v4 18/26] arm64: smp: Support non-SGIs for IPIs

Lorenzo Pieralisi posted 26 patches 9 months ago
There is a newer version of this series
[PATCH v4 18/26] arm64: smp: Support non-SGIs for IPIs
Posted by Lorenzo Pieralisi 9 months ago
From: Marc Zyngier <maz@kernel.org>

The arm64 arch has relied so far on GIC architectural software
generated interrupt (SGIs) to handle IPIs. Those are per-cpu
software generated interrupts.

arm64 architecture code that allocates the IPIs virtual IRQs and
IRQ descriptors was written accordingly.

On GICv5 systems, IPIs are implemented using LPIs that are not
per-cpu interrupts - they are just normal routable IRQs.

Add arch code to set-up IPIs on systems where they are handled
using normal routable IRQs.

For those systems, force the IRQ affinity (and make it immutable)
to the cpu a given IRQ was assigned to.

Signed-off-by: Marc Zyngier <maz@kernel.org>
[timothy.hayes@arm.com: fixed ipi/irq conversion, irq flags]
Signed-off-by: Timothy Hayes <timothy.hayes@arm.com>
[lpieralisi: changed affinity set-up, log]
Signed-off-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/include/asm/smp.h |   7 ++-
 arch/arm64/kernel/smp.c      | 139 ++++++++++++++++++++++++++++++++-----------
 2 files changed, 111 insertions(+), 35 deletions(-)

diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 2510eec026f7e3d6f0ecf1197c3a81b183ddd216..d6fd6efb66a673ae33825971e4aa07e791c02ee5 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -53,7 +53,12 @@ extern void smp_init_cpus(void);
 /*
  * Register IPI interrupts with the arch SMP code
  */
-extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
+extern void set_smp_ipi_range_percpu(int ipi_base, int nr_ipi, int ncpus);
+
+static inline void set_smp_ipi_range(int ipi_base, int n)
+{
+	set_smp_ipi_range_percpu(ipi_base, n, 0);
+}
 
 /*
  * Called from the secondary holding pen, this is the secondary CPU entry point.
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 3b3f6b56e733039cad7ff5b8995db16a68f3c762..3f3712e47c94c62836fb89cd4bfb3595fbb41557 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -83,7 +83,26 @@ enum ipi_msg_type {
 
 static int ipi_irq_base __ro_after_init;
 static int nr_ipi __ro_after_init = NR_IPI;
-static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
+
+struct ipi_descs {
+	struct irq_desc *descs[MAX_IPI];
+};
+
+static DEFINE_PER_CPU(struct ipi_descs, pcpu_ipi_desc);
+
+#define get_ipi_desc(__cpu, __ipi) (per_cpu_ptr(&pcpu_ipi_desc, __cpu)->descs[__ipi])
+
+static bool percpu_ipi_descs __ro_after_init;
+
+static int ipi_to_irq(int ipi, int cpu)
+{
+	return ipi_irq_base + (cpu * nr_ipi) + ipi;
+}
+
+static int irq_to_ipi(int irq)
+{
+	return (irq - ipi_irq_base) % nr_ipi;
+}
 
 static bool crash_stop;
 
@@ -844,7 +863,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
 		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
 			   prec >= 4 ? " " : "");
 		for_each_online_cpu(cpu)
-			seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
+			seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu));
 		seq_printf(p, "      %s\n", ipi_types[i]);
 	}
 
@@ -919,7 +938,13 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs
 
 static void arm64_backtrace_ipi(cpumask_t *mask)
 {
-	__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
+	unsigned int cpu;
+
+	if (!percpu_ipi_descs)
+		__ipi_send_mask(get_ipi_desc(0, IPI_CPU_BACKTRACE), mask);
+	else
+		for_each_cpu(cpu, mask)
+			__ipi_send_single(get_ipi_desc(cpu, IPI_CPU_BACKTRACE), cpu);
 }
 
 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
@@ -944,7 +969,7 @@ void kgdb_roundup_cpus(void)
 		if (cpu == this_cpu)
 			continue;
 
-		__ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu);
+		__ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu);
 	}
 }
 #endif
@@ -1013,14 +1038,21 @@ static void do_handle_IPI(int ipinr)
 
 static irqreturn_t ipi_handler(int irq, void *data)
 {
-	do_handle_IPI(irq - ipi_irq_base);
+	do_handle_IPI(irq_to_ipi(irq));
 	return IRQ_HANDLED;
 }
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
+	unsigned int cpu;
+
 	trace_ipi_raise(target, ipi_types[ipinr]);
-	__ipi_send_mask(ipi_desc[ipinr], target);
+
+	if (!percpu_ipi_descs)
+		__ipi_send_mask(get_ipi_desc(0, ipinr), target);
+	else
+		for_each_cpu(cpu, target)
+			__ipi_send_single(get_ipi_desc(cpu, ipinr), cpu);
 }
 
 static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
@@ -1046,11 +1078,15 @@ static void ipi_setup(int cpu)
 		return;
 
 	for (i = 0; i < nr_ipi; i++) {
-		if (ipi_should_be_nmi(i)) {
-			prepare_percpu_nmi(ipi_irq_base + i);
-			enable_percpu_nmi(ipi_irq_base + i, 0);
+		if (!percpu_ipi_descs) {
+			if (ipi_should_be_nmi(i)) {
+				prepare_percpu_nmi(ipi_irq_base + i);
+				enable_percpu_nmi(ipi_irq_base + i, 0);
+			} else {
+				enable_percpu_irq(ipi_irq_base + i, 0);
+			}
 		} else {
-			enable_percpu_irq(ipi_irq_base + i, 0);
+			enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
 		}
 	}
 }
@@ -1064,44 +1100,79 @@ static void ipi_teardown(int cpu)
 		return;
 
 	for (i = 0; i < nr_ipi; i++) {
-		if (ipi_should_be_nmi(i)) {
-			disable_percpu_nmi(ipi_irq_base + i);
-			teardown_percpu_nmi(ipi_irq_base + i);
+		if (!percpu_ipi_descs) {
+			if (ipi_should_be_nmi(i)) {
+				disable_percpu_nmi(ipi_irq_base + i);
+				teardown_percpu_nmi(ipi_irq_base + i);
+			} else {
+				disable_percpu_irq(ipi_irq_base + i);
+			}
 		} else {
-			disable_percpu_irq(ipi_irq_base + i);
+			disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
 		}
 	}
 }
 #endif
 
-void __init set_smp_ipi_range(int ipi_base, int n)
+static void ipi_setup_ppi(int ipi)
+{
+	int err, irq, cpu;
+
+	irq = ipi_irq_base + ipi;
+
+	if (ipi_should_be_nmi(irq)) {
+		err = request_percpu_nmi(irq, ipi_handler, "IPI", &irq_stat);
+		WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err);
+	} else {
+		err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat);
+		WARN(err, "Could not request IRQ %d as IRQ, err=%d\n", irq, err);
+	}
+
+	for_each_possible_cpu(cpu)
+		get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
+
+	irq_set_status_flags(irq, IRQ_HIDDEN);
+}
+
+static void ipi_setup_lpi(int ipi, int ncpus)
+{
+	for (int cpu = 0; cpu < ncpus; cpu++) {
+		int err, irq;
+
+		irq = ipi_to_irq(ipi, cpu);
+
+		err = irq_force_affinity(irq, cpumask_of(cpu));
+
+		WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err);
+
+		err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI",
+				  &irq_stat);
+
+		WARN(err, "Could not request IRQ %d, err=%d\n", irq, err);
+
+		irq_set_status_flags(irq, (IRQ_HIDDEN | IRQ_NO_BALANCING_MASK));
+
+		get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
+	}
+}
+
+void __init set_smp_ipi_range_percpu(int ipi_base, int n, int ncpus)
 {
 	int i;
 
 	WARN_ON(n < MAX_IPI);
 	nr_ipi = min(n, MAX_IPI);
 
-	for (i = 0; i < nr_ipi; i++) {
-		int err;
-
-		if (ipi_should_be_nmi(i)) {
-			err = request_percpu_nmi(ipi_base + i, ipi_handler,
-						 "IPI", &irq_stat);
-			WARN(err, "Could not request IPI %d as NMI, err=%d\n",
-			     i, err);
-		} else {
-			err = request_percpu_irq(ipi_base + i, ipi_handler,
-						 "IPI", &irq_stat);
-			WARN(err, "Could not request IPI %d as IRQ, err=%d\n",
-			     i, err);
-		}
-
-		ipi_desc[i] = irq_to_desc(ipi_base + i);
-		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
-	}
-
+	percpu_ipi_descs = !!ncpus;
 	ipi_irq_base = ipi_base;
 
+	for (i = 0; i < nr_ipi; i++) {
+		if (!percpu_ipi_descs)
+			ipi_setup_ppi(i);
+		else
+			ipi_setup_lpi(i, ncpus);
+	}
+
 	/* Setup the boot CPU immediately */
 	ipi_setup(smp_processor_id());
 }

-- 
2.48.0
Re: [PATCH v4 18/26] arm64: smp: Support non-SGIs for IPIs
Posted by Jonathan Cameron 8 months, 2 weeks ago
On Tue, 13 May 2025 19:48:11 +0200
Lorenzo Pieralisi <lpieralisi@kernel.org> wrote:

> From: Marc Zyngier <maz@kernel.org>
> 
> The arm64 arch has relied so far on GIC architectural software
> generated interrupt (SGIs) to handle IPIs. Those are per-cpu
> software generated interrupts.
> 
> arm64 architecture code that allocates the IPIs virtual IRQs and
> IRQ descriptors was written accordingly.
> 
> On GICv5 systems, IPIs are implemented using LPIs that are not
> per-cpu interrupts - they are just normal routable IRQs.
> 
> Add arch code to set-up IPIs on systems where they are handled
> using normal routable IRQs.
> 
> For those systems, force the IRQ affinity (and make it immutable)
> to the cpu a given IRQ was assigned to.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> [timothy.hayes@arm.com: fixed ipi/irq conversion, irq flags]
> Signed-off-by: Timothy Hayes <timothy.hayes@arm.com>
> [lpieralisi: changed affinity set-up, log]
> Signed-off-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
> Cc: Will Deacon <will@kernel.org>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
Hi Lorenzo,

A few trivial comments inline.

> +
> +static int ipi_to_irq(int ipi, int cpu)

Maybe this naming needs a breadcrumb to indicate this only
applies only to lpi case as it's directly computed in the old ppi code?
A comment might do the job.

> +{
> +	return ipi_irq_base + (cpu * nr_ipi) + ipi;
> +}
> +
> +static int irq_to_ipi(int irq)
> +{
> +	return (irq - ipi_irq_base) % nr_ipi;
> +}


> +static void ipi_setup_lpi(int ipi, int ncpus)
> +{
> +	for (int cpu = 0; cpu < ncpus; cpu++) {
> +		int err, irq;
> +
> +		irq = ipi_to_irq(ipi, cpu);
> +
> +		err = irq_force_affinity(irq, cpumask_of(cpu));
> +
Trivial local consistency thing but maybe no blank line here or...
> +		WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err);
> +
> +		err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI",
> +				  &irq_stat);
> +
here to match the style in ipi_setup_ppi()

> +		WARN(err, "Could not request IRQ %d, err=%d\n", irq, err);
> +
> +		irq_set_status_flags(irq, (IRQ_HIDDEN | IRQ_NO_BALANCING_MASK));
> +
> +		get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
> +	}
> +}
Re: [PATCH v4 18/26] arm64: smp: Support non-SGIs for IPIs
Posted by Lorenzo Pieralisi 8 months, 2 weeks ago
On Wed, May 28, 2025 at 01:17:44PM +0100, Jonathan Cameron wrote:
> On Tue, 13 May 2025 19:48:11 +0200
> Lorenzo Pieralisi <lpieralisi@kernel.org> wrote:
> 
> > From: Marc Zyngier <maz@kernel.org>
> > 
> > The arm64 arch has relied so far on GIC architectural software
> > generated interrupt (SGIs) to handle IPIs. Those are per-cpu
> > software generated interrupts.
> > 
> > arm64 architecture code that allocates the IPIs virtual IRQs and
> > IRQ descriptors was written accordingly.
> > 
> > On GICv5 systems, IPIs are implemented using LPIs that are not
> > per-cpu interrupts - they are just normal routable IRQs.
> > 
> > Add arch code to set-up IPIs on systems where they are handled
> > using normal routable IRQs.
> > 
> > For those systems, force the IRQ affinity (and make it immutable)
> > to the cpu a given IRQ was assigned to.
> > 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > [timothy.hayes@arm.com: fixed ipi/irq conversion, irq flags]
> > Signed-off-by: Timothy Hayes <timothy.hayes@arm.com>
> > [lpieralisi: changed affinity set-up, log]
> > Signed-off-by: Lorenzo Pieralisi <lpieralisi@kernel.org>
> > Cc: Will Deacon <will@kernel.org>
> > Cc: Catalin Marinas <catalin.marinas@arm.com>
> Hi Lorenzo,
> 
> A few trivial comments inline.
> 
> > +
> > +static int ipi_to_irq(int ipi, int cpu)
> 
> Maybe this naming needs a breadcrumb to indicate this only
> applies only to lpi case as it's directly computed in the old ppi code?
> A comment might do the job.

Maybe rename it to ipi_to_irq_percpu() (similar to what we did for
set_smp_ipi_range()) and then

static int ipi_to_irq(int ipi)
{
	ipi_to_irq_percpu(ipi, 0);
}

and use ipi_to_irq() in ppi code ?

Likely overkill, not a big deal anyway.

> > +{
> > +	return ipi_irq_base + (cpu * nr_ipi) + ipi;
> > +}
> > +
> > +static int irq_to_ipi(int irq)
> > +{
> > +	return (irq - ipi_irq_base) % nr_ipi;
> > +}
> 
> 
> > +static void ipi_setup_lpi(int ipi, int ncpus)
> > +{
> > +	for (int cpu = 0; cpu < ncpus; cpu++) {
> > +		int err, irq;
> > +
> > +		irq = ipi_to_irq(ipi, cpu);
> > +
> > +		err = irq_force_affinity(irq, cpumask_of(cpu));
> > +
> Trivial local consistency thing but maybe no blank line here or...
> > +		WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err);
> > +
> > +		err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI",
> > +				  &irq_stat);
> > +
> here to match the style in ipi_setup_ppi()

Done.

Thanks,
Lorenzo

> > +		WARN(err, "Could not request IRQ %d, err=%d\n", irq, err);
> > +
> > +		irq_set_status_flags(irq, (IRQ_HIDDEN | IRQ_NO_BALANCING_MASK));
> > +
> > +		get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
> > +	}
> > +}
>
Re: [PATCH v4 18/26] arm64: smp: Support non-SGIs for IPIs
Posted by Lorenzo Pieralisi 9 months ago
On Tue, May 13, 2025 at 07:48:11PM +0200, Lorenzo Pieralisi wrote:

[...]

>  /*
>   * Called from the secondary holding pen, this is the secondary CPU entry point.
> diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
> index 3b3f6b56e733039cad7ff5b8995db16a68f3c762..3f3712e47c94c62836fb89cd4bfb3595fbb41557 100644
> --- a/arch/arm64/kernel/smp.c
> +++ b/arch/arm64/kernel/smp.c
> @@ -83,7 +83,26 @@ enum ipi_msg_type {
>  
>  static int ipi_irq_base __ro_after_init;
>  static int nr_ipi __ro_after_init = NR_IPI;
> -static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
> +
> +struct ipi_descs {
> +	struct irq_desc *descs[MAX_IPI];
> +};
> +
> +static DEFINE_PER_CPU(struct ipi_descs, pcpu_ipi_desc);
> +
> +#define get_ipi_desc(__cpu, __ipi) (per_cpu_ptr(&pcpu_ipi_desc, __cpu)->descs[__ipi])
> +
> +static bool percpu_ipi_descs __ro_after_init;
> +
> +static int ipi_to_irq(int ipi, int cpu)
> +{
> +	return ipi_irq_base + (cpu * nr_ipi) + ipi;
> +}
> +
> +static int irq_to_ipi(int irq)
> +{
> +	return (irq - ipi_irq_base) % nr_ipi;
> +}
>  
>  static bool crash_stop;
>  
> @@ -844,7 +863,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
>  		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
>  			   prec >= 4 ? " " : "");
>  		for_each_online_cpu(cpu)
> -			seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
> +			seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu));
>  		seq_printf(p, "      %s\n", ipi_types[i]);
>  	}
>  
> @@ -919,7 +938,13 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs
>  
>  static void arm64_backtrace_ipi(cpumask_t *mask)
>  {
> -	__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
> +	unsigned int cpu;
> +
> +	if (!percpu_ipi_descs)
> +		__ipi_send_mask(get_ipi_desc(0, IPI_CPU_BACKTRACE), mask);
> +	else
> +		for_each_cpu(cpu, mask)
> +			__ipi_send_single(get_ipi_desc(cpu, IPI_CPU_BACKTRACE), cpu);
>  }
>  
>  void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
> @@ -944,7 +969,7 @@ void kgdb_roundup_cpus(void)
>  		if (cpu == this_cpu)
>  			continue;
>  
> -		__ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu);
> +		__ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu);
>  	}
>  }
>  #endif
> @@ -1013,14 +1038,21 @@ static void do_handle_IPI(int ipinr)
>  
>  static irqreturn_t ipi_handler(int irq, void *data)
>  {
> -	do_handle_IPI(irq - ipi_irq_base);
> +	do_handle_IPI(irq_to_ipi(irq));
>  	return IRQ_HANDLED;
>  }
>  
>  static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
>  {
> +	unsigned int cpu;
> +
>  	trace_ipi_raise(target, ipi_types[ipinr]);
> -	__ipi_send_mask(ipi_desc[ipinr], target);
> +
> +	if (!percpu_ipi_descs)
> +		__ipi_send_mask(get_ipi_desc(0, ipinr), target);
> +	else
> +		for_each_cpu(cpu, target)
> +			__ipi_send_single(get_ipi_desc(cpu, ipinr), cpu);
>  }
>  
>  static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
> @@ -1046,11 +1078,15 @@ static void ipi_setup(int cpu)
>  		return;
>  
>  	for (i = 0; i < nr_ipi; i++) {
> -		if (ipi_should_be_nmi(i)) {
> -			prepare_percpu_nmi(ipi_irq_base + i);
> -			enable_percpu_nmi(ipi_irq_base + i, 0);
> +		if (!percpu_ipi_descs) {
> +			if (ipi_should_be_nmi(i)) {
> +				prepare_percpu_nmi(ipi_irq_base + i);
> +				enable_percpu_nmi(ipi_irq_base + i, 0);
> +			} else {
> +				enable_percpu_irq(ipi_irq_base + i, 0);
> +			}
>  		} else {
> -			enable_percpu_irq(ipi_irq_base + i, 0);
> +			enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
>  		}
>  	}
>  }
> @@ -1064,44 +1100,79 @@ static void ipi_teardown(int cpu)
>  		return;
>  
>  	for (i = 0; i < nr_ipi; i++) {
> -		if (ipi_should_be_nmi(i)) {
> -			disable_percpu_nmi(ipi_irq_base + i);
> -			teardown_percpu_nmi(ipi_irq_base + i);
> +		if (!percpu_ipi_descs) {
> +			if (ipi_should_be_nmi(i)) {
> +				disable_percpu_nmi(ipi_irq_base + i);
> +				teardown_percpu_nmi(ipi_irq_base + i);
> +			} else {
> +				disable_percpu_irq(ipi_irq_base + i);
> +			}
>  		} else {
> -			disable_percpu_irq(ipi_irq_base + i);
> +			disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
>  		}
>  	}
>  }
>  #endif
>  
> -void __init set_smp_ipi_range(int ipi_base, int n)
> +static void ipi_setup_ppi(int ipi)
> +{
> +	int err, irq, cpu;
> +
> +	irq = ipi_irq_base + ipi;
> +
> +	if (ipi_should_be_nmi(irq)) {
> +		err = request_percpu_nmi(irq, ipi_handler, "IPI", &irq_stat);
> +		WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err);
> +	} else {
> +		err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat);
> +		WARN(err, "Could not request IRQ %d as IRQ, err=%d\n", irq, err);
> +	}
> +
> +	for_each_possible_cpu(cpu)
> +		get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
> +
> +	irq_set_status_flags(irq, IRQ_HIDDEN);
> +}
> +
> +static void ipi_setup_lpi(int ipi, int ncpus)
> +{
> +	for (int cpu = 0; cpu < ncpus; cpu++) {
> +		int err, irq;
> +
> +		irq = ipi_to_irq(ipi, cpu);
> +
> +		err = irq_force_affinity(irq, cpumask_of(cpu));
> +
> +		WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err);
> +
> +		err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI",
> +				  &irq_stat);

Heads-up, kbuild bot (sparse) barfed (correctly) at this, because the
&irq_stat pointer does not match the request_irq() void *dev_id parameter
signature (it is void __percpu *).

Of course, the &irq_stat parameter is unused so this is harmless.

I would just pass NULL (because AFAICS irq_stat in the action handler is
unused), the question is why are we passing &irq_stat in
request_percpu_irq() if that's unused in ipi_handler() ?

Was it used before and we removed its usage ? Should we clean it up
for completeness ?

Thanks,
Lorenzo

> +
> +		WARN(err, "Could not request IRQ %d, err=%d\n", irq, err);
> +
> +		irq_set_status_flags(irq, (IRQ_HIDDEN | IRQ_NO_BALANCING_MASK));
> +
> +		get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
> +	}
> +}
> +
> +void __init set_smp_ipi_range_percpu(int ipi_base, int n, int ncpus)
>  {
>  	int i;
>  
>  	WARN_ON(n < MAX_IPI);
>  	nr_ipi = min(n, MAX_IPI);
>  
> -	for (i = 0; i < nr_ipi; i++) {
> -		int err;
> -
> -		if (ipi_should_be_nmi(i)) {
> -			err = request_percpu_nmi(ipi_base + i, ipi_handler,
> -						 "IPI", &irq_stat);
> -			WARN(err, "Could not request IPI %d as NMI, err=%d\n",
> -			     i, err);
> -		} else {
> -			err = request_percpu_irq(ipi_base + i, ipi_handler,
> -						 "IPI", &irq_stat);
> -			WARN(err, "Could not request IPI %d as IRQ, err=%d\n",
> -			     i, err);
> -		}
> -
> -		ipi_desc[i] = irq_to_desc(ipi_base + i);
> -		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
> -	}
> -
> +	percpu_ipi_descs = !!ncpus;
>  	ipi_irq_base = ipi_base;
>  
> +	for (i = 0; i < nr_ipi; i++) {
> +		if (!percpu_ipi_descs)
> +			ipi_setup_ppi(i);
> +		else
> +			ipi_setup_lpi(i, ncpus);
> +	}
> +
>  	/* Setup the boot CPU immediately */
>  	ipi_setup(smp_processor_id());
>  }
> 
> -- 
> 2.48.0
>
Re: [PATCH v4 18/26] arm64: smp: Support non-SGIs for IPIs
Posted by Lorenzo Pieralisi 9 months ago
On Wed, May 14, 2025 at 12:39:28PM +0200, Lorenzo Pieralisi wrote:
> On Tue, May 13, 2025 at 07:48:11PM +0200, Lorenzo Pieralisi wrote:
> 
> [...]
> 
> >  /*
> >   * Called from the secondary holding pen, this is the secondary CPU entry point.
> > diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
> > index 3b3f6b56e733039cad7ff5b8995db16a68f3c762..3f3712e47c94c62836fb89cd4bfb3595fbb41557 100644
> > --- a/arch/arm64/kernel/smp.c
> > +++ b/arch/arm64/kernel/smp.c
> > @@ -83,7 +83,26 @@ enum ipi_msg_type {
> >  
> >  static int ipi_irq_base __ro_after_init;
> >  static int nr_ipi __ro_after_init = NR_IPI;
> > -static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
> > +
> > +struct ipi_descs {
> > +	struct irq_desc *descs[MAX_IPI];
> > +};
> > +
> > +static DEFINE_PER_CPU(struct ipi_descs, pcpu_ipi_desc);
> > +
> > +#define get_ipi_desc(__cpu, __ipi) (per_cpu_ptr(&pcpu_ipi_desc, __cpu)->descs[__ipi])
> > +
> > +static bool percpu_ipi_descs __ro_after_init;
> > +
> > +static int ipi_to_irq(int ipi, int cpu)
> > +{
> > +	return ipi_irq_base + (cpu * nr_ipi) + ipi;
> > +}
> > +
> > +static int irq_to_ipi(int irq)
> > +{
> > +	return (irq - ipi_irq_base) % nr_ipi;
> > +}
> >  
> >  static bool crash_stop;
> >  
> > @@ -844,7 +863,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
> >  		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
> >  			   prec >= 4 ? " " : "");
> >  		for_each_online_cpu(cpu)
> > -			seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
> > +			seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu));
> >  		seq_printf(p, "      %s\n", ipi_types[i]);
> >  	}
> >  
> > @@ -919,7 +938,13 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs
> >  
> >  static void arm64_backtrace_ipi(cpumask_t *mask)
> >  {
> > -	__ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
> > +	unsigned int cpu;
> > +
> > +	if (!percpu_ipi_descs)
> > +		__ipi_send_mask(get_ipi_desc(0, IPI_CPU_BACKTRACE), mask);
> > +	else
> > +		for_each_cpu(cpu, mask)
> > +			__ipi_send_single(get_ipi_desc(cpu, IPI_CPU_BACKTRACE), cpu);
> >  }
> >  
> >  void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
> > @@ -944,7 +969,7 @@ void kgdb_roundup_cpus(void)
> >  		if (cpu == this_cpu)
> >  			continue;
> >  
> > -		__ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu);
> > +		__ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu);
> >  	}
> >  }
> >  #endif
> > @@ -1013,14 +1038,21 @@ static void do_handle_IPI(int ipinr)
> >  
> >  static irqreturn_t ipi_handler(int irq, void *data)
> >  {
> > -	do_handle_IPI(irq - ipi_irq_base);
> > +	do_handle_IPI(irq_to_ipi(irq));
> >  	return IRQ_HANDLED;
> >  }
> >  
> >  static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
> >  {
> > +	unsigned int cpu;
> > +
> >  	trace_ipi_raise(target, ipi_types[ipinr]);
> > -	__ipi_send_mask(ipi_desc[ipinr], target);
> > +
> > +	if (!percpu_ipi_descs)
> > +		__ipi_send_mask(get_ipi_desc(0, ipinr), target);
> > +	else
> > +		for_each_cpu(cpu, target)
> > +			__ipi_send_single(get_ipi_desc(cpu, ipinr), cpu);
> >  }
> >  
> >  static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
> > @@ -1046,11 +1078,15 @@ static void ipi_setup(int cpu)
> >  		return;
> >  
> >  	for (i = 0; i < nr_ipi; i++) {
> > -		if (ipi_should_be_nmi(i)) {
> > -			prepare_percpu_nmi(ipi_irq_base + i);
> > -			enable_percpu_nmi(ipi_irq_base + i, 0);
> > +		if (!percpu_ipi_descs) {
> > +			if (ipi_should_be_nmi(i)) {
> > +				prepare_percpu_nmi(ipi_irq_base + i);
> > +				enable_percpu_nmi(ipi_irq_base + i, 0);
> > +			} else {
> > +				enable_percpu_irq(ipi_irq_base + i, 0);
> > +			}
> >  		} else {
> > -			enable_percpu_irq(ipi_irq_base + i, 0);
> > +			enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
> >  		}
> >  	}
> >  }
> > @@ -1064,44 +1100,79 @@ static void ipi_teardown(int cpu)
> >  		return;
> >  
> >  	for (i = 0; i < nr_ipi; i++) {
> > -		if (ipi_should_be_nmi(i)) {
> > -			disable_percpu_nmi(ipi_irq_base + i);
> > -			teardown_percpu_nmi(ipi_irq_base + i);
> > +		if (!percpu_ipi_descs) {
> > +			if (ipi_should_be_nmi(i)) {
> > +				disable_percpu_nmi(ipi_irq_base + i);
> > +				teardown_percpu_nmi(ipi_irq_base + i);
> > +			} else {
> > +				disable_percpu_irq(ipi_irq_base + i);
> > +			}
> >  		} else {
> > -			disable_percpu_irq(ipi_irq_base + i);
> > +			disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i)));
> >  		}
> >  	}
> >  }
> >  #endif
> >  
> > -void __init set_smp_ipi_range(int ipi_base, int n)
> > +static void ipi_setup_ppi(int ipi)
> > +{
> > +	int err, irq, cpu;
> > +
> > +	irq = ipi_irq_base + ipi;
> > +
> > +	if (ipi_should_be_nmi(irq)) {
> > +		err = request_percpu_nmi(irq, ipi_handler, "IPI", &irq_stat);
> > +		WARN(err, "Could not request IRQ %d as NMI, err=%d\n", irq, err);
> > +	} else {
> > +		err = request_percpu_irq(irq, ipi_handler, "IPI", &irq_stat);
> > +		WARN(err, "Could not request IRQ %d as IRQ, err=%d\n", irq, err);
> > +	}
> > +
> > +	for_each_possible_cpu(cpu)
> > +		get_ipi_desc(cpu, ipi) = irq_to_desc(irq);
> > +
> > +	irq_set_status_flags(irq, IRQ_HIDDEN);
> > +}
> > +
> > +static void ipi_setup_lpi(int ipi, int ncpus)
> > +{
> > +	for (int cpu = 0; cpu < ncpus; cpu++) {
> > +		int err, irq;
> > +
> > +		irq = ipi_to_irq(ipi, cpu);
> > +
> > +		err = irq_force_affinity(irq, cpumask_of(cpu));
> > +
> > +		WARN(err, "Could not force affinity IRQ %d, err=%d\n", irq, err);
> > +
> > +		err = request_irq(irq, ipi_handler, IRQF_NO_AUTOEN, "IPI",
> > +				  &irq_stat);
> 
> Heads-up, kbuild bot (sparse) barfed (correctly) at this, because the
> &irq_stat pointer does not match the request_irq() void *dev_id parameter
> signature (it is void __percpu *).
> 
> Of course, the &irq_stat parameter is unused so this is harmless.
> 
> I would just pass NULL (because AFAICS irq_stat in the action handler is
> unused), the question is why are we passing &irq_stat in
> request_percpu_irq() if that's unused in ipi_handler() ?

Right, we have to have it there even if the ipi_handler() does not use
it, that's as much as I can gather by checking the request_percpu_irq()
interface and the percpu flow handler (handle_percpu_devid_irq()) used
for SGIs IRQs on GICv3.

For non-SGI IPIs, I will just pass NULL to request_irq() as void *dev_id
because AFAICS it is not used in arm64 ipi_handler() (I use the
handle_percpu_irq() flow handler), I applied the fix-up locally FYI.

Lorenzo