arch/arm64/kernel/topology.c | 87 +--------------------------------- drivers/base/arch_topology.c | 89 ++++++++++++++++++++++++++++++++++- include/linux/arch_topology.h | 1 + 3 files changed, 90 insertions(+), 87 deletions(-)
Currently, RISC-V lacks arch-specific registers for CPU topology
properties and must get them from ACPI. Thus, parse_acpi_topology()
is moved from arm64/ to drivers/ for RISC-V reuse.
Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com>
---
arch/arm64/kernel/topology.c | 87 +---------------------------------
drivers/base/arch_topology.c | 89 ++++++++++++++++++++++++++++++++++-
include/linux/arch_topology.h | 1 +
3 files changed, 90 insertions(+), 87 deletions(-)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 5d07ee85bdae4..55650db53b526 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -26,7 +26,7 @@
#include <asm/topology.h>
#ifdef CONFIG_ACPI
-static bool __init acpi_cpu_is_threaded(int cpu)
+bool __init acpi_cpu_is_threaded(int cpu)
{
int is_threaded = acpi_pptt_cpu_is_thread(cpu);
@@ -39,91 +39,6 @@ static bool __init acpi_cpu_is_threaded(int cpu)
return !!is_threaded;
}
-
-struct cpu_smt_info {
- unsigned int thread_num;
- int core_id;
-};
-
-/*
- * Propagate the topology information of the processor_topology_node tree to the
- * cpu_topology array.
- */
-int __init parse_acpi_topology(void)
-{
- unsigned int max_smt_thread_num = 1;
- struct cpu_smt_info *entry;
- struct xarray hetero_cpu;
- unsigned long hetero_id;
- int cpu, topology_id;
-
- if (acpi_disabled)
- return 0;
-
- xa_init(&hetero_cpu);
-
- for_each_possible_cpu(cpu) {
- topology_id = find_acpi_cpu_topology(cpu, 0);
- if (topology_id < 0)
- return topology_id;
-
- if (acpi_cpu_is_threaded(cpu)) {
- cpu_topology[cpu].thread_id = topology_id;
- topology_id = find_acpi_cpu_topology(cpu, 1);
- cpu_topology[cpu].core_id = topology_id;
-
- /*
- * In the PPTT, CPUs below a node with the 'identical
- * implementation' flag have the same number of threads.
- * Count the number of threads for only one CPU (i.e.
- * one core_id) among those with the same hetero_id.
- * See the comment of find_acpi_cpu_topology_hetero_id()
- * for more details.
- *
- * One entry is created for each node having:
- * - the 'identical implementation' flag
- * - its parent not having the flag
- */
- hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
- entry = xa_load(&hetero_cpu, hetero_id);
- if (!entry) {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- WARN_ON_ONCE(!entry);
-
- if (entry) {
- entry->core_id = topology_id;
- entry->thread_num = 1;
- xa_store(&hetero_cpu, hetero_id,
- entry, GFP_KERNEL);
- }
- } else if (entry->core_id == topology_id) {
- entry->thread_num++;
- }
- } else {
- cpu_topology[cpu].thread_id = -1;
- cpu_topology[cpu].core_id = topology_id;
- }
- topology_id = find_acpi_cpu_topology_cluster(cpu);
- cpu_topology[cpu].cluster_id = topology_id;
- topology_id = find_acpi_cpu_topology_package(cpu);
- cpu_topology[cpu].package_id = topology_id;
- }
-
- /*
- * This is a short loop since the number of XArray elements is the
- * number of heterogeneous CPU clusters. On a homogeneous system
- * there's only one entry in the XArray.
- */
- xa_for_each(&hetero_cpu, hetero_id, entry) {
- max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
- xa_erase(&hetero_cpu, hetero_id);
- kfree(entry);
- }
-
- cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
- xa_destroy(&hetero_cpu);
- return 0;
-}
#endif
#ifdef CONFIG_ARM64_AMU_EXTN
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1037169abb459..c22746b45d57a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -823,12 +823,99 @@ void remove_cpu_topology(unsigned int cpu)
clear_cpu_topology(cpu);
}
+__weak bool __init acpi_cpu_is_threaded(int cpu)
+{
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+ return !!is_threaded;
+}
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+struct cpu_smt_info {
+ unsigned int thread_num;
+ int core_id;
+};
+
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
__weak int __init parse_acpi_topology(void)
{
+ unsigned int max_smt_thread_num = 1;
+ struct cpu_smt_info *entry;
+ struct xarray hetero_cpu;
+ unsigned long hetero_id;
+ int cpu, topology_id;
+
+ if (acpi_disabled)
+ return 0;
+
+ xa_init(&hetero_cpu);
+
+ for_each_possible_cpu(cpu) {
+ topology_id = find_acpi_cpu_topology(cpu, 0);
+ if (topology_id < 0)
+ return topology_id;
+
+ if (acpi_cpu_is_threaded(cpu)) {
+ cpu_topology[cpu].thread_id = topology_id;
+ topology_id = find_acpi_cpu_topology(cpu, 1);
+ cpu_topology[cpu].core_id = topology_id;
+
+ /*
+ * In the PPTT, CPUs below a node with the 'identical
+ * implementation' flag have the same number of threads.
+ * Count the number of threads for only one CPU (i.e.
+ * one core_id) among those with the same hetero_id.
+ * See the comment of find_acpi_cpu_topology_hetero_id()
+ * for more details.
+ *
+ * One entry is created for each node having:
+ * - the 'identical implementation' flag
+ * - its parent not having the flag
+ */
+ hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
+ entry = xa_load(&hetero_cpu, hetero_id);
+ if (!entry) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ WARN_ON_ONCE(!entry);
+
+ if (entry) {
+ entry->core_id = topology_id;
+ entry->thread_num = 1;
+ xa_store(&hetero_cpu, hetero_id,
+ entry, GFP_KERNEL);
+ }
+ } else if (entry->core_id == topology_id) {
+ entry->thread_num++;
+ }
+ } else {
+ cpu_topology[cpu].thread_id = -1;
+ cpu_topology[cpu].core_id = topology_id;
+ }
+ topology_id = find_acpi_cpu_topology_cluster(cpu);
+ cpu_topology[cpu].cluster_id = topology_id;
+ topology_id = find_acpi_cpu_topology_package(cpu);
+ cpu_topology[cpu].package_id = topology_id;
+ }
+
+ /*
+ * This is a short loop since the number of XArray elements is the
+ * number of heterogeneous CPU clusters. On a homogeneous system
+ * there's only one entry in the XArray.
+ */
+ xa_for_each(&hetero_cpu, hetero_id, entry) {
+ max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
+ xa_erase(&hetero_cpu, hetero_id);
+ kfree(entry);
+ }
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+ xa_destroy(&hetero_cpu);
return 0;
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
int cpu, ret;
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index d72d6e5aa2002..8cd8a9604f33f 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -88,6 +88,7 @@ void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
+bool acpi_cpu_is_threaded(int cpu);
void freq_inv_set_max_ratio(int cpu, u64 max_rate);
#endif
--
2.39.5
On 2025/9/18 9:48, Yunhui Cui wrote: > Currently, RISC-V lacks arch-specific registers for CPU topology > properties and must get them from ACPI. Thus, parse_acpi_topology() > is moved from arm64/ to drivers/ for RISC-V reuse. > > Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com> > --- > arch/arm64/kernel/topology.c | 87 +--------------------------------- > drivers/base/arch_topology.c | 89 ++++++++++++++++++++++++++++++++++- > include/linux/arch_topology.h | 1 + > 3 files changed, 90 insertions(+), 87 deletions(-) > > diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c > index 5d07ee85bdae4..55650db53b526 100644 > --- a/arch/arm64/kernel/topology.c > +++ b/arch/arm64/kernel/topology.c > @@ -26,7 +26,7 @@ > #include <asm/topology.h> > > #ifdef CONFIG_ACPI > -static bool __init acpi_cpu_is_threaded(int cpu) > +bool __init acpi_cpu_is_threaded(int cpu) > { > int is_threaded = acpi_pptt_cpu_is_thread(cpu); > > @@ -39,91 +39,6 @@ static bool __init acpi_cpu_is_threaded(int cpu) > > return !!is_threaded; > } > - > -struct cpu_smt_info { > - unsigned int thread_num; > - int core_id; > -}; > - > -/* > - * Propagate the topology information of the processor_topology_node tree to the > - * cpu_topology array. > - */ > -int __init parse_acpi_topology(void) > -{ > - unsigned int max_smt_thread_num = 1; > - struct cpu_smt_info *entry; > - struct xarray hetero_cpu; > - unsigned long hetero_id; > - int cpu, topology_id; > - > - if (acpi_disabled) > - return 0; > - > - xa_init(&hetero_cpu); > - > - for_each_possible_cpu(cpu) { > - topology_id = find_acpi_cpu_topology(cpu, 0); > - if (topology_id < 0) > - return topology_id; > - > - if (acpi_cpu_is_threaded(cpu)) { > - cpu_topology[cpu].thread_id = topology_id; > - topology_id = find_acpi_cpu_topology(cpu, 1); > - cpu_topology[cpu].core_id = topology_id; > - > - /* > - * In the PPTT, CPUs below a node with the 'identical > - * implementation' flag have the same number of threads. > - * Count the number of threads for only one CPU (i.e. > - * one core_id) among those with the same hetero_id. > - * See the comment of find_acpi_cpu_topology_hetero_id() > - * for more details. > - * > - * One entry is created for each node having: > - * - the 'identical implementation' flag > - * - its parent not having the flag > - */ > - hetero_id = find_acpi_cpu_topology_hetero_id(cpu); > - entry = xa_load(&hetero_cpu, hetero_id); > - if (!entry) { > - entry = kzalloc(sizeof(*entry), GFP_KERNEL); > - WARN_ON_ONCE(!entry); > - > - if (entry) { > - entry->core_id = topology_id; > - entry->thread_num = 1; > - xa_store(&hetero_cpu, hetero_id, > - entry, GFP_KERNEL); > - } > - } else if (entry->core_id == topology_id) { > - entry->thread_num++; > - } > - } else { > - cpu_topology[cpu].thread_id = -1; > - cpu_topology[cpu].core_id = topology_id; > - } > - topology_id = find_acpi_cpu_topology_cluster(cpu); > - cpu_topology[cpu].cluster_id = topology_id; > - topology_id = find_acpi_cpu_topology_package(cpu); > - cpu_topology[cpu].package_id = topology_id; > - } > - > - /* > - * This is a short loop since the number of XArray elements is the > - * number of heterogeneous CPU clusters. On a homogeneous system > - * there's only one entry in the XArray. > - */ > - xa_for_each(&hetero_cpu, hetero_id, entry) { > - max_smt_thread_num = max(max_smt_thread_num, entry->thread_num); > - xa_erase(&hetero_cpu, hetero_id); > - kfree(entry); > - } > - > - cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num); > - xa_destroy(&hetero_cpu); > - return 0; > -} > #endif > > #ifdef CONFIG_ARM64_AMU_EXTN > diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c > index 1037169abb459..c22746b45d57a 100644 > --- a/drivers/base/arch_topology.c > +++ b/drivers/base/arch_topology.c > @@ -823,12 +823,99 @@ void remove_cpu_topology(unsigned int cpu) > clear_cpu_topology(cpu); > } > > +__weak bool __init acpi_cpu_is_threaded(int cpu) > +{ > + int is_threaded = acpi_pptt_cpu_is_thread(cpu); > + > + return !!is_threaded; > +} > + you seem to miss the comment here in v1. acpi_pptt_cpu_is_thread() may return -ENOENT, in which case the CPU shouldn't be threaded? thanks.
Hi Yicong, On Thu, Sep 18, 2025 at 5:23 PM Yicong Yang <yangyicong@huawei.com> wrote: > > On 2025/9/18 9:48, Yunhui Cui wrote: > > Currently, RISC-V lacks arch-specific registers for CPU topology > > properties and must get them from ACPI. Thus, parse_acpi_topology() > > is moved from arm64/ to drivers/ for RISC-V reuse. > > > > Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com> > > --- > > arch/arm64/kernel/topology.c | 87 +--------------------------------- > > drivers/base/arch_topology.c | 89 ++++++++++++++++++++++++++++++++++- > > include/linux/arch_topology.h | 1 + > > 3 files changed, 90 insertions(+), 87 deletions(-) > > > > diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c > > index 5d07ee85bdae4..55650db53b526 100644 > > --- a/arch/arm64/kernel/topology.c > > +++ b/arch/arm64/kernel/topology.c > > @@ -26,7 +26,7 @@ > > #include <asm/topology.h> > > > > #ifdef CONFIG_ACPI > > -static bool __init acpi_cpu_is_threaded(int cpu) > > +bool __init acpi_cpu_is_threaded(int cpu) > > { > > int is_threaded = acpi_pptt_cpu_is_thread(cpu); > > > > @@ -39,91 +39,6 @@ static bool __init acpi_cpu_is_threaded(int cpu) > > > > return !!is_threaded; > > } > > - > > -struct cpu_smt_info { > > - unsigned int thread_num; > > - int core_id; > > -}; > > - > > -/* > > - * Propagate the topology information of the processor_topology_node tree to the > > - * cpu_topology array. > > - */ > > -int __init parse_acpi_topology(void) > > -{ > > - unsigned int max_smt_thread_num = 1; > > - struct cpu_smt_info *entry; > > - struct xarray hetero_cpu; > > - unsigned long hetero_id; > > - int cpu, topology_id; > > - > > - if (acpi_disabled) > > - return 0; > > - > > - xa_init(&hetero_cpu); > > - > > - for_each_possible_cpu(cpu) { > > - topology_id = find_acpi_cpu_topology(cpu, 0); > > - if (topology_id < 0) > > - return topology_id; > > - > > - if (acpi_cpu_is_threaded(cpu)) { > > - cpu_topology[cpu].thread_id = topology_id; > > - topology_id = find_acpi_cpu_topology(cpu, 1); > > - cpu_topology[cpu].core_id = topology_id; > > - > > - /* > > - * In the PPTT, CPUs below a node with the 'identical > > - * implementation' flag have the same number of threads. > > - * Count the number of threads for only one CPU (i.e. > > - * one core_id) among those with the same hetero_id. > > - * See the comment of find_acpi_cpu_topology_hetero_id() > > - * for more details. > > - * > > - * One entry is created for each node having: > > - * - the 'identical implementation' flag > > - * - its parent not having the flag > > - */ > > - hetero_id = find_acpi_cpu_topology_hetero_id(cpu); > > - entry = xa_load(&hetero_cpu, hetero_id); > > - if (!entry) { > > - entry = kzalloc(sizeof(*entry), GFP_KERNEL); > > - WARN_ON_ONCE(!entry); > > - > > - if (entry) { > > - entry->core_id = topology_id; > > - entry->thread_num = 1; > > - xa_store(&hetero_cpu, hetero_id, > > - entry, GFP_KERNEL); > > - } > > - } else if (entry->core_id == topology_id) { > > - entry->thread_num++; > > - } > > - } else { > > - cpu_topology[cpu].thread_id = -1; > > - cpu_topology[cpu].core_id = topology_id; > > - } > > - topology_id = find_acpi_cpu_topology_cluster(cpu); > > - cpu_topology[cpu].cluster_id = topology_id; > > - topology_id = find_acpi_cpu_topology_package(cpu); > > - cpu_topology[cpu].package_id = topology_id; > > - } > > - > > - /* > > - * This is a short loop since the number of XArray elements is the > > - * number of heterogeneous CPU clusters. On a homogeneous system > > - * there's only one entry in the XArray. > > - */ > > - xa_for_each(&hetero_cpu, hetero_id, entry) { > > - max_smt_thread_num = max(max_smt_thread_num, entry->thread_num); > > - xa_erase(&hetero_cpu, hetero_id); > > - kfree(entry); > > - } > > - > > - cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num); > > - xa_destroy(&hetero_cpu); > > - return 0; > > -} > > #endif > > > > #ifdef CONFIG_ARM64_AMU_EXTN > > diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c > > index 1037169abb459..c22746b45d57a 100644 > > --- a/drivers/base/arch_topology.c > > +++ b/drivers/base/arch_topology.c > > @@ -823,12 +823,99 @@ void remove_cpu_topology(unsigned int cpu) > > clear_cpu_topology(cpu); > > } > > > > +__weak bool __init acpi_cpu_is_threaded(int cpu) > > +{ > > + int is_threaded = acpi_pptt_cpu_is_thread(cpu); > > + > > + return !!is_threaded; > > +} > > + > > you seem to miss the comment here in v1. acpi_pptt_cpu_is_thread() may return > -ENOENT, in which case the CPU shouldn't be threaded? Oh, got it, okay. > > thanks. Thanks, Yunhui
On Thu, Sep 18, 2025 at 09:48:28AM +0800, Yunhui Cui wrote: > Currently, RISC-V lacks arch-specific registers for CPU topology > properties and must get them from ACPI. Thus, parse_acpi_topology() > is moved from arm64/ to drivers/ for RISC-V reuse. > > Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com> > --- > arch/arm64/kernel/topology.c | 87 +--------------------------------- > drivers/base/arch_topology.c | 89 ++++++++++++++++++++++++++++++++++- > include/linux/arch_topology.h | 1 + > 3 files changed, 90 insertions(+), 87 deletions(-) > Hi, This is the friendly patch-bot of Greg Kroah-Hartman. You have sent him a patch that has triggered this response. He used to manually respond to these common problems, but in order to save his sanity (he kept writing the same thing over and over, yet to different people), I was created. Hopefully you will not take offence and will fix the problem in your patch and resubmit it so that it can be accepted into the Linux kernel tree. You are receiving this message because of the following common error(s) as indicated below: - This looks like a new version of a previously submitted patch, but you did not list below the --- line any changes from the previous version. Please read the section entitled "The canonical patch format" in the kernel file, Documentation/process/submitting-patches.rst for what needs to be done here to properly describe this. If you wish to discuss this problem further, or you have questions about how to resolve this issue, please feel free to respond to this email and Greg will reply once he has dug out from the pending patches received from other developers. thanks, greg k-h's patch email bot
© 2016 - 2025 Red Hat, Inc.