[PATCH] arch_topology: move parse_acpi_topology() to common code

Yunhui Cui posted 1 patch 2 weeks, 1 day ago
There is a newer version of this series
arch/arm64/kernel/topology.c  | 87 +----------------------------------
drivers/base/arch_topology.c  | 85 +++++++++++++++++++++++++++++++++-
include/linux/arch_topology.h |  6 +++
3 files changed, 91 insertions(+), 87 deletions(-)
[PATCH] arch_topology: move parse_acpi_topology() to common code
Posted by Yunhui Cui 2 weeks, 1 day ago
Currently, RISC-V lacks arch-specific registers for CPU topology
properties and must get them from ACPI. Thus, parse_acpi_topology()
is moved from arm64/ to drivers/ for RISC-V reuse.

Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com>
---
 arch/arm64/kernel/topology.c  | 87 +----------------------------------
 drivers/base/arch_topology.c  | 85 +++++++++++++++++++++++++++++++++-
 include/linux/arch_topology.h |  6 +++
 3 files changed, 91 insertions(+), 87 deletions(-)

diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 5d07ee85bdae4..55650db53b526 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -26,7 +26,7 @@
 #include <asm/topology.h>
 
 #ifdef CONFIG_ACPI
-static bool __init acpi_cpu_is_threaded(int cpu)
+bool __init acpi_cpu_is_threaded(int cpu)
 {
 	int is_threaded = acpi_pptt_cpu_is_thread(cpu);
 
@@ -39,91 +39,6 @@ static bool __init acpi_cpu_is_threaded(int cpu)
 
 	return !!is_threaded;
 }
-
-struct cpu_smt_info {
-	unsigned int thread_num;
-	int core_id;
-};
-
-/*
- * Propagate the topology information of the processor_topology_node tree to the
- * cpu_topology array.
- */
-int __init parse_acpi_topology(void)
-{
-	unsigned int max_smt_thread_num = 1;
-	struct cpu_smt_info *entry;
-	struct xarray hetero_cpu;
-	unsigned long hetero_id;
-	int cpu, topology_id;
-
-	if (acpi_disabled)
-		return 0;
-
-	xa_init(&hetero_cpu);
-
-	for_each_possible_cpu(cpu) {
-		topology_id = find_acpi_cpu_topology(cpu, 0);
-		if (topology_id < 0)
-			return topology_id;
-
-		if (acpi_cpu_is_threaded(cpu)) {
-			cpu_topology[cpu].thread_id = topology_id;
-			topology_id = find_acpi_cpu_topology(cpu, 1);
-			cpu_topology[cpu].core_id   = topology_id;
-
-			/*
-			 * In the PPTT, CPUs below a node with the 'identical
-			 * implementation' flag have the same number of threads.
-			 * Count the number of threads for only one CPU (i.e.
-			 * one core_id) among those with the same hetero_id.
-			 * See the comment of find_acpi_cpu_topology_hetero_id()
-			 * for more details.
-			 *
-			 * One entry is created for each node having:
-			 * - the 'identical implementation' flag
-			 * - its parent not having the flag
-			 */
-			hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
-			entry = xa_load(&hetero_cpu, hetero_id);
-			if (!entry) {
-				entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-				WARN_ON_ONCE(!entry);
-
-				if (entry) {
-					entry->core_id = topology_id;
-					entry->thread_num = 1;
-					xa_store(&hetero_cpu, hetero_id,
-						 entry, GFP_KERNEL);
-				}
-			} else if (entry->core_id == topology_id) {
-				entry->thread_num++;
-			}
-		} else {
-			cpu_topology[cpu].thread_id  = -1;
-			cpu_topology[cpu].core_id    = topology_id;
-		}
-		topology_id = find_acpi_cpu_topology_cluster(cpu);
-		cpu_topology[cpu].cluster_id = topology_id;
-		topology_id = find_acpi_cpu_topology_package(cpu);
-		cpu_topology[cpu].package_id = topology_id;
-	}
-
-	/*
-	 * This is a short loop since the number of XArray elements is the
-	 * number of heterogeneous CPU clusters. On a homogeneous system
-	 * there's only one entry in the XArray.
-	 */
-	xa_for_each(&hetero_cpu, hetero_id, entry) {
-		max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
-		xa_erase(&hetero_cpu, hetero_id);
-		kfree(entry);
-	}
-
-	cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
-	xa_destroy(&hetero_cpu);
-	return 0;
-}
 #endif
 
 #ifdef CONFIG_ARM64_AMU_EXTN
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1037169abb459..65ec1f3d2bd28 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -823,12 +823,95 @@ void remove_cpu_topology(unsigned int cpu)
 	clear_cpu_topology(cpu);
 }
 
+__weak bool __init acpi_cpu_is_threaded(int cpu)
+{
+	int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+	return !!is_threaded;
+
+}
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
 __weak int __init parse_acpi_topology(void)
 {
+	unsigned int max_smt_thread_num = 1;
+	struct cpu_smt_info *entry;
+	struct xarray hetero_cpu;
+	unsigned long hetero_id;
+	int cpu, topology_id;
+
+	if (acpi_disabled)
+		return 0;
+
+	xa_init(&hetero_cpu);
+
+	for_each_possible_cpu(cpu) {
+		topology_id = find_acpi_cpu_topology(cpu, 0);
+		if (topology_id < 0)
+			return topology_id;
+
+		if (acpi_cpu_is_threaded(cpu)) {
+			cpu_topology[cpu].thread_id = topology_id;
+			topology_id = find_acpi_cpu_topology(cpu, 1);
+			cpu_topology[cpu].core_id   = topology_id;
+
+			/*
+			 * In the PPTT, CPUs below a node with the 'identical
+			 * implementation' flag have the same number of threads.
+			 * Count the number of threads for only one CPU (i.e.
+			 * one core_id) among those with the same hetero_id.
+			 * See the comment of find_acpi_cpu_topology_hetero_id()
+			 * for more details.
+			 *
+			 * One entry is created for each node having:
+			 * - the 'identical implementation' flag
+			 * - its parent not having the flag
+			 */
+			hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
+			entry = xa_load(&hetero_cpu, hetero_id);
+			if (!entry) {
+				entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+				WARN_ON_ONCE(!entry);
+
+				if (entry) {
+					entry->core_id = topology_id;
+					entry->thread_num = 1;
+					xa_store(&hetero_cpu, hetero_id,
+						 entry, GFP_KERNEL);
+				}
+			} else if (entry->core_id == topology_id) {
+				entry->thread_num++;
+			}
+		} else {
+			cpu_topology[cpu].thread_id  = -1;
+			cpu_topology[cpu].core_id    = topology_id;
+		}
+		topology_id = find_acpi_cpu_topology_cluster(cpu);
+		cpu_topology[cpu].cluster_id = topology_id;
+		topology_id = find_acpi_cpu_topology_package(cpu);
+		cpu_topology[cpu].package_id = topology_id;
+	}
+
+	/*
+	 * This is a short loop since the number of XArray elements is the
+	 * number of heterogeneous CPU clusters. On a homogeneous system
+	 * there's only one entry in the XArray.
+	 */
+	xa_for_each(&hetero_cpu, hetero_id, entry) {
+		max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
+		xa_erase(&hetero_cpu, hetero_id);
+		kfree(entry);
+	}
+
+	cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+	xa_destroy(&hetero_cpu);
 	return 0;
 }
 
-#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
 void __init init_cpu_topology(void)
 {
 	int cpu, ret;
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index d72d6e5aa2002..50d33b5a78ccd 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -70,6 +70,11 @@ struct cpu_topology {
 	cpumask_t llc_sibling;
 };
 
+struct cpu_smt_info {
+	unsigned int thread_num;
+	int core_id;
+};
+
 #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
 extern struct cpu_topology cpu_topology[NR_CPUS];
 
@@ -88,6 +93,7 @@ void update_siblings_masks(unsigned int cpu);
 void remove_cpu_topology(unsigned int cpuid);
 void reset_cpu_topology(void);
 int parse_acpi_topology(void);
+bool acpi_cpu_is_threaded(int cpu);
 void freq_inv_set_max_ratio(int cpu, u64 max_rate);
 #endif
 
-- 
2.39.5
Re: [PATCH] arch_topology: move parse_acpi_topology() to common code
Posted by Yicong Yang 2 weeks, 1 day ago
On 2025/9/17 10:30, Yunhui Cui wrote:
> Currently, RISC-V lacks arch-specific registers for CPU topology
> properties and must get them from ACPI. Thus, parse_acpi_topology()
> is moved from arm64/ to drivers/ for RISC-V reuse.
> 
> Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com>
> ---
>  arch/arm64/kernel/topology.c  | 87 +----------------------------------
>  drivers/base/arch_topology.c  | 85 +++++++++++++++++++++++++++++++++-
>  include/linux/arch_topology.h |  6 +++
>  3 files changed, 91 insertions(+), 87 deletions(-)
> 
> diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
> index 5d07ee85bdae4..55650db53b526 100644
> --- a/arch/arm64/kernel/topology.c
> +++ b/arch/arm64/kernel/topology.c
> @@ -26,7 +26,7 @@
>  #include <asm/topology.h>
>  
>  #ifdef CONFIG_ACPI
> -static bool __init acpi_cpu_is_threaded(int cpu)
> +bool __init acpi_cpu_is_threaded(int cpu)
>  {
>  	int is_threaded = acpi_pptt_cpu_is_thread(cpu);
>  
> @@ -39,91 +39,6 @@ static bool __init acpi_cpu_is_threaded(int cpu)
>  
>  	return !!is_threaded;
>  }
> -
> -struct cpu_smt_info {
> -	unsigned int thread_num;
> -	int core_id;
> -};
> -
> -/*
> - * Propagate the topology information of the processor_topology_node tree to the
> - * cpu_topology array.
> - */
> -int __init parse_acpi_topology(void)
> -{
> -	unsigned int max_smt_thread_num = 1;
> -	struct cpu_smt_info *entry;
> -	struct xarray hetero_cpu;
> -	unsigned long hetero_id;
> -	int cpu, topology_id;
> -
> -	if (acpi_disabled)
> -		return 0;
> -
> -	xa_init(&hetero_cpu);
> -
> -	for_each_possible_cpu(cpu) {
> -		topology_id = find_acpi_cpu_topology(cpu, 0);
> -		if (topology_id < 0)
> -			return topology_id;
> -
> -		if (acpi_cpu_is_threaded(cpu)) {
> -			cpu_topology[cpu].thread_id = topology_id;
> -			topology_id = find_acpi_cpu_topology(cpu, 1);
> -			cpu_topology[cpu].core_id   = topology_id;
> -
> -			/*
> -			 * In the PPTT, CPUs below a node with the 'identical
> -			 * implementation' flag have the same number of threads.
> -			 * Count the number of threads for only one CPU (i.e.
> -			 * one core_id) among those with the same hetero_id.
> -			 * See the comment of find_acpi_cpu_topology_hetero_id()
> -			 * for more details.
> -			 *
> -			 * One entry is created for each node having:
> -			 * - the 'identical implementation' flag
> -			 * - its parent not having the flag
> -			 */
> -			hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
> -			entry = xa_load(&hetero_cpu, hetero_id);
> -			if (!entry) {
> -				entry = kzalloc(sizeof(*entry), GFP_KERNEL);
> -				WARN_ON_ONCE(!entry);
> -
> -				if (entry) {
> -					entry->core_id = topology_id;
> -					entry->thread_num = 1;
> -					xa_store(&hetero_cpu, hetero_id,
> -						 entry, GFP_KERNEL);
> -				}
> -			} else if (entry->core_id == topology_id) {
> -				entry->thread_num++;
> -			}
> -		} else {
> -			cpu_topology[cpu].thread_id  = -1;
> -			cpu_topology[cpu].core_id    = topology_id;
> -		}
> -		topology_id = find_acpi_cpu_topology_cluster(cpu);
> -		cpu_topology[cpu].cluster_id = topology_id;
> -		topology_id = find_acpi_cpu_topology_package(cpu);
> -		cpu_topology[cpu].package_id = topology_id;
> -	}
> -
> -	/*
> -	 * This is a short loop since the number of XArray elements is the
> -	 * number of heterogeneous CPU clusters. On a homogeneous system
> -	 * there's only one entry in the XArray.
> -	 */
> -	xa_for_each(&hetero_cpu, hetero_id, entry) {
> -		max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
> -		xa_erase(&hetero_cpu, hetero_id);
> -		kfree(entry);
> -	}
> -
> -	cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
> -	xa_destroy(&hetero_cpu);
> -	return 0;
> -}
>  #endif
>  
>  #ifdef CONFIG_ARM64_AMU_EXTN
> diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
> index 1037169abb459..65ec1f3d2bd28 100644
> --- a/drivers/base/arch_topology.c
> +++ b/drivers/base/arch_topology.c
> @@ -823,12 +823,95 @@ void remove_cpu_topology(unsigned int cpu)
>  	clear_cpu_topology(cpu);
>  }
>  
> +__weak bool __init acpi_cpu_is_threaded(int cpu)
> +{
> +	int is_threaded = acpi_pptt_cpu_is_thread(cpu);
> +
> +	return !!is_threaded;
> +

acpi_pptt_cpu_is_thread() could return an error which shouldn't be
regarded as a threaded cpu.

> +}
> +
> +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
> +/*
> + * Propagate the topology information of the processor_topology_node tree to the
> + * cpu_topology array.
> + */
>  __weak int __init parse_acpi_topology(void)
>  {
> +	unsigned int max_smt_thread_num = 1;
> +	struct cpu_smt_info *entry;
> +	struct xarray hetero_cpu;
> +	unsigned long hetero_id;
> +	int cpu, topology_id;
> +
> +	if (acpi_disabled)
> +		return 0;
> +
> +	xa_init(&hetero_cpu);
> +
> +	for_each_possible_cpu(cpu) {
> +		topology_id = find_acpi_cpu_topology(cpu, 0);
> +		if (topology_id < 0)
> +			return topology_id;
> +
> +		if (acpi_cpu_is_threaded(cpu)) {
> +			cpu_topology[cpu].thread_id = topology_id;
> +			topology_id = find_acpi_cpu_topology(cpu, 1);
> +			cpu_topology[cpu].core_id   = topology_id;
> +
> +			/*
> +			 * In the PPTT, CPUs below a node with the 'identical
> +			 * implementation' flag have the same number of threads.
> +			 * Count the number of threads for only one CPU (i.e.
> +			 * one core_id) among those with the same hetero_id.
> +			 * See the comment of find_acpi_cpu_topology_hetero_id()
> +			 * for more details.
> +			 *
> +			 * One entry is created for each node having:
> +			 * - the 'identical implementation' flag
> +			 * - its parent not having the flag
> +			 */
> +			hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
> +			entry = xa_load(&hetero_cpu, hetero_id);
> +			if (!entry) {
> +				entry = kzalloc(sizeof(*entry), GFP_KERNEL);
> +				WARN_ON_ONCE(!entry);
> +
> +				if (entry) {
> +					entry->core_id = topology_id;
> +					entry->thread_num = 1;
> +					xa_store(&hetero_cpu, hetero_id,
> +						 entry, GFP_KERNEL);
> +				}
> +			} else if (entry->core_id == topology_id) {
> +				entry->thread_num++;
> +			}
> +		} else {
> +			cpu_topology[cpu].thread_id  = -1;
> +			cpu_topology[cpu].core_id    = topology_id;
> +		}
> +		topology_id = find_acpi_cpu_topology_cluster(cpu);
> +		cpu_topology[cpu].cluster_id = topology_id;
> +		topology_id = find_acpi_cpu_topology_package(cpu);
> +		cpu_topology[cpu].package_id = topology_id;
> +	}
> +
> +	/*
> +	 * This is a short loop since the number of XArray elements is the
> +	 * number of heterogeneous CPU clusters. On a homogeneous system
> +	 * there's only one entry in the XArray.
> +	 */
> +	xa_for_each(&hetero_cpu, hetero_id, entry) {
> +		max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
> +		xa_erase(&hetero_cpu, hetero_id);
> +		kfree(entry);
> +	}
> +
> +	cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
> +	xa_destroy(&hetero_cpu);
>  	return 0;
>  }
>  
> -#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
>  void __init init_cpu_topology(void)
>  {
>  	int cpu, ret;
> diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
> index d72d6e5aa2002..50d33b5a78ccd 100644
> --- a/include/linux/arch_topology.h
> +++ b/include/linux/arch_topology.h
> @@ -70,6 +70,11 @@ struct cpu_topology {
>  	cpumask_t llc_sibling;
>  };
>  
> +struct cpu_smt_info {
> +	unsigned int thread_num;
> +	int core_id;
> +};
> +

this is only used in parse_acpi_topology() and seems no reason to make it into
the header.

otherwise looks good to me. most acpi topology building code is not arm64
specific and make sense to make it common.

thanks.
Re: [External] Re: [PATCH] arch_topology: move parse_acpi_topology() to common code
Posted by yunhui cui 2 weeks ago
Hi Yicong,

On Wed, Sep 17, 2025 at 2:43 PM Yicong Yang <yangyicong@huawei.com> wrote:
>
> On 2025/9/17 10:30, Yunhui Cui wrote:
> > Currently, RISC-V lacks arch-specific registers for CPU topology
> > properties and must get them from ACPI. Thus, parse_acpi_topology()
> > is moved from arm64/ to drivers/ for RISC-V reuse.
> >
> > Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com>
> > ---
> >  arch/arm64/kernel/topology.c  | 87 +----------------------------------
> >  drivers/base/arch_topology.c  | 85 +++++++++++++++++++++++++++++++++-
> >  include/linux/arch_topology.h |  6 +++
> >  3 files changed, 91 insertions(+), 87 deletions(-)
> >
> > diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
> > index 5d07ee85bdae4..55650db53b526 100644
> > --- a/arch/arm64/kernel/topology.c
> > +++ b/arch/arm64/kernel/topology.c
> > @@ -26,7 +26,7 @@
> >  #include <asm/topology.h>
> >
> >  #ifdef CONFIG_ACPI
> > -static bool __init acpi_cpu_is_threaded(int cpu)
> > +bool __init acpi_cpu_is_threaded(int cpu)
> >  {
> >       int is_threaded = acpi_pptt_cpu_is_thread(cpu);
> >
> > @@ -39,91 +39,6 @@ static bool __init acpi_cpu_is_threaded(int cpu)
> >
> >       return !!is_threaded;
> >  }
> > -
> > -struct cpu_smt_info {
> > -     unsigned int thread_num;
> > -     int core_id;
> > -};
> > -
> > -/*
> > - * Propagate the topology information of the processor_topology_node tree to the
> > - * cpu_topology array.
> > - */
> > -int __init parse_acpi_topology(void)
> > -{
> > -     unsigned int max_smt_thread_num = 1;
> > -     struct cpu_smt_info *entry;
> > -     struct xarray hetero_cpu;
> > -     unsigned long hetero_id;
> > -     int cpu, topology_id;
> > -
> > -     if (acpi_disabled)
> > -             return 0;
> > -
> > -     xa_init(&hetero_cpu);
> > -
> > -     for_each_possible_cpu(cpu) {
> > -             topology_id = find_acpi_cpu_topology(cpu, 0);
> > -             if (topology_id < 0)
> > -                     return topology_id;
> > -
> > -             if (acpi_cpu_is_threaded(cpu)) {
> > -                     cpu_topology[cpu].thread_id = topology_id;
> > -                     topology_id = find_acpi_cpu_topology(cpu, 1);
> > -                     cpu_topology[cpu].core_id   = topology_id;
> > -
> > -                     /*
> > -                      * In the PPTT, CPUs below a node with the 'identical
> > -                      * implementation' flag have the same number of threads.
> > -                      * Count the number of threads for only one CPU (i.e.
> > -                      * one core_id) among those with the same hetero_id.
> > -                      * See the comment of find_acpi_cpu_topology_hetero_id()
> > -                      * for more details.
> > -                      *
> > -                      * One entry is created for each node having:
> > -                      * - the 'identical implementation' flag
> > -                      * - its parent not having the flag
> > -                      */
> > -                     hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
> > -                     entry = xa_load(&hetero_cpu, hetero_id);
> > -                     if (!entry) {
> > -                             entry = kzalloc(sizeof(*entry), GFP_KERNEL);
> > -                             WARN_ON_ONCE(!entry);
> > -
> > -                             if (entry) {
> > -                                     entry->core_id = topology_id;
> > -                                     entry->thread_num = 1;
> > -                                     xa_store(&hetero_cpu, hetero_id,
> > -                                              entry, GFP_KERNEL);
> > -                             }
> > -                     } else if (entry->core_id == topology_id) {
> > -                             entry->thread_num++;
> > -                     }
> > -             } else {
> > -                     cpu_topology[cpu].thread_id  = -1;
> > -                     cpu_topology[cpu].core_id    = topology_id;
> > -             }
> > -             topology_id = find_acpi_cpu_topology_cluster(cpu);
> > -             cpu_topology[cpu].cluster_id = topology_id;
> > -             topology_id = find_acpi_cpu_topology_package(cpu);
> > -             cpu_topology[cpu].package_id = topology_id;
> > -     }
> > -
> > -     /*
> > -      * This is a short loop since the number of XArray elements is the
> > -      * number of heterogeneous CPU clusters. On a homogeneous system
> > -      * there's only one entry in the XArray.
> > -      */
> > -     xa_for_each(&hetero_cpu, hetero_id, entry) {
> > -             max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
> > -             xa_erase(&hetero_cpu, hetero_id);
> > -             kfree(entry);
> > -     }
> > -
> > -     cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
> > -     xa_destroy(&hetero_cpu);
> > -     return 0;
> > -}
> >  #endif
> >
> >  #ifdef CONFIG_ARM64_AMU_EXTN
> > diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
> > index 1037169abb459..65ec1f3d2bd28 100644
> > --- a/drivers/base/arch_topology.c
> > +++ b/drivers/base/arch_topology.c
> > @@ -823,12 +823,95 @@ void remove_cpu_topology(unsigned int cpu)
> >       clear_cpu_topology(cpu);
> >  }
> >
> > +__weak bool __init acpi_cpu_is_threaded(int cpu)
> > +{
> > +     int is_threaded = acpi_pptt_cpu_is_thread(cpu);
> > +
> > +     return !!is_threaded;
> > +
>
> acpi_pptt_cpu_is_thread() could return an error which shouldn't be
> regarded as a threaded cpu.
>
> > +}
> > +
> > +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
> > +/*
> > + * Propagate the topology information of the processor_topology_node tree to the
> > + * cpu_topology array.
> > + */
> >  __weak int __init parse_acpi_topology(void)
> >  {
> > +     unsigned int max_smt_thread_num = 1;
> > +     struct cpu_smt_info *entry;
> > +     struct xarray hetero_cpu;
> > +     unsigned long hetero_id;
> > +     int cpu, topology_id;
> > +
> > +     if (acpi_disabled)
> > +             return 0;
> > +
> > +     xa_init(&hetero_cpu);
> > +
> > +     for_each_possible_cpu(cpu) {
> > +             topology_id = find_acpi_cpu_topology(cpu, 0);
> > +             if (topology_id < 0)
> > +                     return topology_id;
> > +
> > +             if (acpi_cpu_is_threaded(cpu)) {
> > +                     cpu_topology[cpu].thread_id = topology_id;
> > +                     topology_id = find_acpi_cpu_topology(cpu, 1);
> > +                     cpu_topology[cpu].core_id   = topology_id;
> > +
> > +                     /*
> > +                      * In the PPTT, CPUs below a node with the 'identical
> > +                      * implementation' flag have the same number of threads.
> > +                      * Count the number of threads for only one CPU (i.e.
> > +                      * one core_id) among those with the same hetero_id.
> > +                      * See the comment of find_acpi_cpu_topology_hetero_id()
> > +                      * for more details.
> > +                      *
> > +                      * One entry is created for each node having:
> > +                      * - the 'identical implementation' flag
> > +                      * - its parent not having the flag
> > +                      */
> > +                     hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
> > +                     entry = xa_load(&hetero_cpu, hetero_id);
> > +                     if (!entry) {
> > +                             entry = kzalloc(sizeof(*entry), GFP_KERNEL);
> > +                             WARN_ON_ONCE(!entry);
> > +
> > +                             if (entry) {
> > +                                     entry->core_id = topology_id;
> > +                                     entry->thread_num = 1;
> > +                                     xa_store(&hetero_cpu, hetero_id,
> > +                                              entry, GFP_KERNEL);
> > +                             }
> > +                     } else if (entry->core_id == topology_id) {
> > +                             entry->thread_num++;
> > +                     }
> > +             } else {
> > +                     cpu_topology[cpu].thread_id  = -1;
> > +                     cpu_topology[cpu].core_id    = topology_id;
> > +             }
> > +             topology_id = find_acpi_cpu_topology_cluster(cpu);
> > +             cpu_topology[cpu].cluster_id = topology_id;
> > +             topology_id = find_acpi_cpu_topology_package(cpu);
> > +             cpu_topology[cpu].package_id = topology_id;
> > +     }
> > +
> > +     /*
> > +      * This is a short loop since the number of XArray elements is the
> > +      * number of heterogeneous CPU clusters. On a homogeneous system
> > +      * there's only one entry in the XArray.
> > +      */
> > +     xa_for_each(&hetero_cpu, hetero_id, entry) {
> > +             max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
> > +             xa_erase(&hetero_cpu, hetero_id);
> > +             kfree(entry);
> > +     }
> > +
> > +     cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
> > +     xa_destroy(&hetero_cpu);
> >       return 0;
> >  }
> >
> > -#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
> >  void __init init_cpu_topology(void)
> >  {
> >       int cpu, ret;
> > diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
> > index d72d6e5aa2002..50d33b5a78ccd 100644
> > --- a/include/linux/arch_topology.h
> > +++ b/include/linux/arch_topology.h
> > @@ -70,6 +70,11 @@ struct cpu_topology {
> >       cpumask_t llc_sibling;
> >  };
> >
> > +struct cpu_smt_info {
> > +     unsigned int thread_num;
> > +     int core_id;
> > +};
> > +
>
> this is only used in parse_acpi_topology() and seems no reason to make it into
> the header.

Okay, I'll update it in v2.

>
> otherwise looks good to me. most acpi topology building code is not arm64
> specific and make sense to make it common.
>
> thanks.
>

Thanks,
Yunhui