[XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to common

Wei Chen posted 40 patches 4 years, 6 months ago
[XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to common
Posted by Wei Chen 4 years, 6 months ago
After the previous patches preparations, numa_scan_nodes can be
used by Arm and x86. So we move this function from x86 to common.
As node_cover_memory will not be used cross files, we restore its
static attribute in this patch.

Signed-off-by: Wei Chen <wei.chen@arm.com>
---
 xen/arch/x86/srat.c        | 52 ------------------------------------
 xen/common/numa.c          | 54 +++++++++++++++++++++++++++++++++++++-
 xen/include/asm-x86/acpi.h |  3 ---
 xen/include/xen/numa.h     |  3 ++-
 4 files changed, 55 insertions(+), 57 deletions(-)

diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index c979939fdd..c9f019c307 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -361,58 +361,6 @@ void __init srat_parse_regions(u64 addr)
 	pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
 }
 
-/* Use the information discovered above to actually set up the nodes. */
-int __init numa_scan_nodes(u64 start, u64 end)
-{
-	int i;
-	nodemask_t all_nodes_parsed;
-
-	/* First clean up the node list */
-	for (i = 0; i < MAX_NUMNODES; i++)
-		cutoff_node(i, start, end);
-
-#ifdef CONFIG_ACPI_NUMA
-	if (acpi_numa <= 0)
-		return -1;
-#endif
-
-	if (!nodes_cover_memory()) {
-		bad_srat();
-		return -1;
-	}
-
-	memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
-				memblk_nodeid);
-
-	if (memnode_shift < 0) {
-		printk(KERN_ERR
-		     "SRAT: No NUMA node hash function found. Contact maintainer\n");
-		bad_srat();
-		return -1;
-	}
-
-	nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed);
-
-	/* Finally register nodes */
-	for_each_node_mask(i, all_nodes_parsed)
-	{
-		u64 size = nodes[i].end - nodes[i].start;
-		if ( size == 0 )
-			printk(KERN_WARNING "SRAT: Node %u has no memory. "
-			       "BIOS Bug or mis-configured hardware?\n", i);
-
-		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-	}
-	for (i = 0; i < nr_cpu_ids; i++) {
-		if (cpu_to_node[i] == NUMA_NO_NODE)
-			continue;
-		if (!nodemask_test(cpu_to_node[i], &processor_nodes_parsed))
-			numa_set_node(i, NUMA_NO_NODE);
-	}
-	numa_init_array();
-	return 0;
-}
-
 static unsigned node_to_pxm(nodeid_t n)
 {
 	unsigned i;
diff --git a/xen/common/numa.c b/xen/common/numa.c
index 4152bbe83b..8ca13e27d1 100644
--- a/xen/common/numa.c
+++ b/xen/common/numa.c
@@ -195,7 +195,7 @@ void __init cutoff_node(int i, u64 start, u64 end)
 
 /* Sanity check to catch more bad SRATs (they are amazingly common).
    Make sure the PXMs cover all memory. */
-int __init nodes_cover_memory(void)
+static int __init nodes_cover_memory(void)
 {
 	int i;
 	uint32_t nr_banks = arch_meminfo_get_nr_bank();
@@ -271,6 +271,58 @@ void __init numa_init_array(void)
     }
 }
 
+/* Use the information discovered above to actually set up the nodes. */
+int __init numa_scan_nodes(u64 start, u64 end)
+{
+	int i;
+	nodemask_t all_nodes_parsed;
+
+	/* First clean up the node list */
+	for (i = 0; i < MAX_NUMNODES; i++)
+		cutoff_node(i, start, end);
+
+#ifdef CONFIG_ACPI_NUMA
+	if (acpi_numa <= 0)
+		return -1;
+#endif
+
+	if (!nodes_cover_memory()) {
+		bad_srat();
+		return -1;
+	}
+
+	memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
+				memblk_nodeid);
+
+	if (memnode_shift < 0) {
+		printk(KERN_ERR
+		     "SRAT: No NUMA node hash function found. Contact maintainer\n");
+		bad_srat();
+		return -1;
+	}
+
+	nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed);
+
+	/* Finally register nodes */
+	for_each_node_mask(i, all_nodes_parsed)
+	{
+		u64 size = nodes[i].end - nodes[i].start;
+		if ( size == 0 )
+			printk(KERN_WARNING "SRAT: Node %u has no memory. "
+			       "BIOS Bug or mis-configured hardware?\n", i);
+
+		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
+	}
+	for (i = 0; i < nr_cpu_ids; i++) {
+		if (cpu_to_node[i] == NUMA_NO_NODE)
+			continue;
+		if (!nodemask_test(cpu_to_node[i], &processor_nodes_parsed))
+			numa_set_node(i, NUMA_NO_NODE);
+	}
+	numa_init_array();
+	return 0;
+}
+
 #ifdef CONFIG_NUMA_EMU
 int numa_fake __initdata = 0;
 
diff --git a/xen/include/asm-x86/acpi.h b/xen/include/asm-x86/acpi.h
index 33b71dfb3b..2140461ff3 100644
--- a/xen/include/asm-x86/acpi.h
+++ b/xen/include/asm-x86/acpi.h
@@ -101,9 +101,6 @@ extern unsigned long acpi_wakeup_address;
 
 #define ARCH_HAS_POWER_INIT	1
 
-extern s8 acpi_numa;
-extern int numa_scan_nodes(u64 start, u64 end);
-
 extern struct acpi_sleep_info acpi_sinfo;
 #define acpi_video_flags bootsym(video_flags)
 struct xenpf_enter_acpi_sleep;
diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
index 490381bd13..b9b5d1ad88 100644
--- a/xen/include/xen/numa.h
+++ b/xen/include/xen/numa.h
@@ -81,8 +81,10 @@ extern void bad_srat(void);
 extern void numa_init_array(void);
 extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
 extern void numa_set_node(int cpu, nodeid_t node);
+extern int numa_scan_nodes(u64 start, u64 end);
 extern bool numa_off;
 extern int numa_fake;
+extern s8 acpi_numa;
 
 extern void setup_node_bootmem(nodeid_t nodeid, u64 start, u64 end);
 
@@ -94,7 +96,6 @@ static inline void clear_node_cpumask(int cpu)
 extern uint32_t arch_meminfo_get_nr_bank(void);
 extern int arch_meminfo_get_ram_bank_range(int bank,
     unsigned long long *start, unsigned long long *end);
-extern int nodes_cover_memory(void);
 
 #endif /* CONFIG_NUMA */
 
-- 
2.25.1


Re: [XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to common
Posted by Julien Grall 4 years, 5 months ago
Hi Wei,

On 11/08/2021 11:24, Wei Chen wrote:
> diff --git a/xen/include/asm-x86/acpi.h b/xen/include/asm-x86/acpi.h
> index 33b71dfb3b..2140461ff3 100644
> --- a/xen/include/asm-x86/acpi.h
> +++ b/xen/include/asm-x86/acpi.h
> @@ -101,9 +101,6 @@ extern unsigned long acpi_wakeup_address;
>   
>   #define ARCH_HAS_POWER_INIT	1
>   
> -extern s8 acpi_numa;
> -extern int numa_scan_nodes(u64 start, u64 end);
> -
>   extern struct acpi_sleep_info acpi_sinfo;
>   #define acpi_video_flags bootsym(video_flags)
>   struct xenpf_enter_acpi_sleep;
> diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
> index 490381bd13..b9b5d1ad88 100644
> --- a/xen/include/xen/numa.h
> +++ b/xen/include/xen/numa.h
> @@ -81,8 +81,10 @@ extern void bad_srat(void);
>   extern void numa_init_array(void);
>   extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
>   extern void numa_set_node(int cpu, nodeid_t node);
> +extern int numa_scan_nodes(u64 start, u64 end);

AFAICT, by the end of the series, the function is only called by the 
common code. So this should be static.

Cheers,

-- 
Julien Grall

RE: [XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to common
Posted by Wei Chen 4 years, 5 months ago
Hi Julien,

> -----Original Message-----
> From: Julien Grall <julien@xen.org>
> Sent: 2021年8月27日 22:14
> To: Wei Chen <Wei.Chen@arm.com>; xen-devel@lists.xenproject.org;
> sstabellini@kernel.org; jbeulich@suse.com
> Cc: Bertrand Marquis <Bertrand.Marquis@arm.com>
> Subject: Re: [XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to
> common
> 
> Hi Wei,
> 
> On 11/08/2021 11:24, Wei Chen wrote:
> > diff --git a/xen/include/asm-x86/acpi.h b/xen/include/asm-x86/acpi.h
> > index 33b71dfb3b..2140461ff3 100644
> > --- a/xen/include/asm-x86/acpi.h
> > +++ b/xen/include/asm-x86/acpi.h
> > @@ -101,9 +101,6 @@ extern unsigned long acpi_wakeup_address;
> >
> >   #define ARCH_HAS_POWER_INIT	1
> >
> > -extern s8 acpi_numa;
> > -extern int numa_scan_nodes(u64 start, u64 end);
> > -
> >   extern struct acpi_sleep_info acpi_sinfo;
> >   #define acpi_video_flags bootsym(video_flags)
> >   struct xenpf_enter_acpi_sleep;
> > diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
> > index 490381bd13..b9b5d1ad88 100644
> > --- a/xen/include/xen/numa.h
> > +++ b/xen/include/xen/numa.h
> > @@ -81,8 +81,10 @@ extern void bad_srat(void);
> >   extern void numa_init_array(void);
> >   extern void numa_initmem_init(unsigned long start_pfn, unsigned long
> end_pfn);
> >   extern void numa_set_node(int cpu, nodeid_t node);
> > +extern int numa_scan_nodes(u64 start, u64 end);
> 
> AFAICT, by the end of the series, the function is only called by the
> common code. So this should be static.
> 

OK

> Cheers,
> 
> --
> Julien Grall
Re: [XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to common
Posted by Stefano Stabellini 4 years, 5 months ago
On Wed, 11 Aug 2021, Wei Chen wrote:
> After the previous patches preparations, numa_scan_nodes can be
> used by Arm and x86. So we move this function from x86 to common.
> As node_cover_memory will not be used cross files, we restore its
> static attribute in this patch.
> 
> Signed-off-by: Wei Chen <wei.chen@arm.com>
> ---
>  xen/arch/x86/srat.c        | 52 ------------------------------------
>  xen/common/numa.c          | 54 +++++++++++++++++++++++++++++++++++++-
>  xen/include/asm-x86/acpi.h |  3 ---
>  xen/include/xen/numa.h     |  3 ++-
>  4 files changed, 55 insertions(+), 57 deletions(-)
> 
> diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
> index c979939fdd..c9f019c307 100644
> --- a/xen/arch/x86/srat.c
> +++ b/xen/arch/x86/srat.c
> @@ -361,58 +361,6 @@ void __init srat_parse_regions(u64 addr)
>  	pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
>  }
>  
> -/* Use the information discovered above to actually set up the nodes. */
> -int __init numa_scan_nodes(u64 start, u64 end)
> -{
> -	int i;
> -	nodemask_t all_nodes_parsed;
> -
> -	/* First clean up the node list */
> -	for (i = 0; i < MAX_NUMNODES; i++)
> -		cutoff_node(i, start, end);
> -
> -#ifdef CONFIG_ACPI_NUMA
> -	if (acpi_numa <= 0)
> -		return -1;
> -#endif
> -
> -	if (!nodes_cover_memory()) {
> -		bad_srat();
> -		return -1;
> -	}
> -
> -	memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
> -				memblk_nodeid);
> -
> -	if (memnode_shift < 0) {
> -		printk(KERN_ERR
> -		     "SRAT: No NUMA node hash function found. Contact maintainer\n");
> -		bad_srat();
> -		return -1;
> -	}
> -
> -	nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed);
> -
> -	/* Finally register nodes */
> -	for_each_node_mask(i, all_nodes_parsed)
> -	{
> -		u64 size = nodes[i].end - nodes[i].start;
> -		if ( size == 0 )
> -			printk(KERN_WARNING "SRAT: Node %u has no memory. "
> -			       "BIOS Bug or mis-configured hardware?\n", i);
> -
> -		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
> -	}
> -	for (i = 0; i < nr_cpu_ids; i++) {
> -		if (cpu_to_node[i] == NUMA_NO_NODE)
> -			continue;
> -		if (!nodemask_test(cpu_to_node[i], &processor_nodes_parsed))
> -			numa_set_node(i, NUMA_NO_NODE);
> -	}
> -	numa_init_array();
> -	return 0;
> -}
> -
>  static unsigned node_to_pxm(nodeid_t n)
>  {
>  	unsigned i;
> diff --git a/xen/common/numa.c b/xen/common/numa.c
> index 4152bbe83b..8ca13e27d1 100644
> --- a/xen/common/numa.c
> +++ b/xen/common/numa.c
> @@ -195,7 +195,7 @@ void __init cutoff_node(int i, u64 start, u64 end)
>  
>  /* Sanity check to catch more bad SRATs (they are amazingly common).
>     Make sure the PXMs cover all memory. */
> -int __init nodes_cover_memory(void)
> +static int __init nodes_cover_memory(void)
>  {
>  	int i;
>  	uint32_t nr_banks = arch_meminfo_get_nr_bank();
> @@ -271,6 +271,58 @@ void __init numa_init_array(void)
>      }
>  }
>  
> +/* Use the information discovered above to actually set up the nodes. */
> +int __init numa_scan_nodes(u64 start, u64 end)
> +{
> +	int i;
> +	nodemask_t all_nodes_parsed;
> +
> +	/* First clean up the node list */
> +	for (i = 0; i < MAX_NUMNODES; i++)
> +		cutoff_node(i, start, end);
> +
> +#ifdef CONFIG_ACPI_NUMA
> +	if (acpi_numa <= 0)
> +		return -1;
> +#endif
> +
> +	if (!nodes_cover_memory()) {
> +		bad_srat();
> +		return -1;
> +	}
> +
> +	memnode_shift = compute_hash_shift(node_memblk_range, num_node_memblks,
> +				memblk_nodeid);
> +
> +	if (memnode_shift < 0) {
> +		printk(KERN_ERR
> +		     "SRAT: No NUMA node hash function found. Contact maintainer\n");
> +		bad_srat();
> +		return -1;
> +	}
> +
> +	nodes_or(all_nodes_parsed, memory_nodes_parsed, processor_nodes_parsed);
> +
> +	/* Finally register nodes */
> +	for_each_node_mask(i, all_nodes_parsed)
> +	{
> +		u64 size = nodes[i].end - nodes[i].start;
> +		if ( size == 0 )
> +			printk(KERN_WARNING "SRAT: Node %u has no memory. "
> +			       "BIOS Bug or mis-configured hardware?\n", i);

Not all archs have a BIOS so I'd say "firmware bug". Like last time, we
usually don't do code changes together with code movement, but in this
case it might be OK. I am also happy with a separate patch to adjust the
two comments (this one and the one in the previous patch).

RE: [XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to common
Posted by Wei Chen 4 years, 5 months ago
Hi Stefano,

> -----Original Message-----
> From: Stefano Stabellini <sstabellini@kernel.org>
> Sent: 2021年8月31日 9:27
> To: Wei Chen <Wei.Chen@arm.com>
> Cc: xen-devel@lists.xenproject.org; sstabellini@kernel.org; julien@xen.org;
> jbeulich@suse.com; Bertrand Marquis <Bertrand.Marquis@arm.com>
> Subject: Re: [XEN RFC PATCH 34/40] xen: move numa_scan_nodes from x86 to
> common
> 
> On Wed, 11 Aug 2021, Wei Chen wrote:
> > After the previous patches preparations, numa_scan_nodes can be
> > used by Arm and x86. So we move this function from x86 to common.
> > As node_cover_memory will not be used cross files, we restore its
> > static attribute in this patch.
> >
> > Signed-off-by: Wei Chen <wei.chen@arm.com>
> > ---
> >  xen/arch/x86/srat.c        | 52 ------------------------------------
> >  xen/common/numa.c          | 54 +++++++++++++++++++++++++++++++++++++-
> >  xen/include/asm-x86/acpi.h |  3 ---
> >  xen/include/xen/numa.h     |  3 ++-
> >  4 files changed, 55 insertions(+), 57 deletions(-)
> >
> > diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
> > index c979939fdd..c9f019c307 100644
> > --- a/xen/arch/x86/srat.c
> > +++ b/xen/arch/x86/srat.c
> > @@ -361,58 +361,6 @@ void __init srat_parse_regions(u64 addr)
> >  	pfn_pdx_hole_setup(mask >> PAGE_SHIFT);
> >  }
> >
> > -/* Use the information discovered above to actually set up the nodes.
> */
> > -int __init numa_scan_nodes(u64 start, u64 end)
> > -{
> > -	int i;
> > -	nodemask_t all_nodes_parsed;
> > -
> > -	/* First clean up the node list */
> > -	for (i = 0; i < MAX_NUMNODES; i++)
> > -		cutoff_node(i, start, end);
> > -
> > -#ifdef CONFIG_ACPI_NUMA
> > -	if (acpi_numa <= 0)
> > -		return -1;
> > -#endif
> > -
> > -	if (!nodes_cover_memory()) {
> > -		bad_srat();
> > -		return -1;
> > -	}
> > -
> > -	memnode_shift = compute_hash_shift(node_memblk_range,
> num_node_memblks,
> > -				memblk_nodeid);
> > -
> > -	if (memnode_shift < 0) {
> > -		printk(KERN_ERR
> > -		     "SRAT: No NUMA node hash function found. Contact
> maintainer\n");
> > -		bad_srat();
> > -		return -1;
> > -	}
> > -
> > -	nodes_or(all_nodes_parsed, memory_nodes_parsed,
> processor_nodes_parsed);
> > -
> > -	/* Finally register nodes */
> > -	for_each_node_mask(i, all_nodes_parsed)
> > -	{
> > -		u64 size = nodes[i].end - nodes[i].start;
> > -		if ( size == 0 )
> > -			printk(KERN_WARNING "SRAT: Node %u has no memory. "
> > -			       "BIOS Bug or mis-configured hardware?\n", i);
> > -
> > -		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
> > -	}
> > -	for (i = 0; i < nr_cpu_ids; i++) {
> > -		if (cpu_to_node[i] == NUMA_NO_NODE)
> > -			continue;
> > -		if (!nodemask_test(cpu_to_node[i], &processor_nodes_parsed))
> > -			numa_set_node(i, NUMA_NO_NODE);
> > -	}
> > -	numa_init_array();
> > -	return 0;
> > -}
> > -
> >  static unsigned node_to_pxm(nodeid_t n)
> >  {
> >  	unsigned i;
> > diff --git a/xen/common/numa.c b/xen/common/numa.c
> > index 4152bbe83b..8ca13e27d1 100644
> > --- a/xen/common/numa.c
> > +++ b/xen/common/numa.c
> > @@ -195,7 +195,7 @@ void __init cutoff_node(int i, u64 start, u64 end)
> >
> >  /* Sanity check to catch more bad SRATs (they are amazingly common).
> >     Make sure the PXMs cover all memory. */
> > -int __init nodes_cover_memory(void)
> > +static int __init nodes_cover_memory(void)
> >  {
> >  	int i;
> >  	uint32_t nr_banks = arch_meminfo_get_nr_bank();
> > @@ -271,6 +271,58 @@ void __init numa_init_array(void)
> >      }
> >  }
> >
> > +/* Use the information discovered above to actually set up the nodes.
> */
> > +int __init numa_scan_nodes(u64 start, u64 end)
> > +{
> > +	int i;
> > +	nodemask_t all_nodes_parsed;
> > +
> > +	/* First clean up the node list */
> > +	for (i = 0; i < MAX_NUMNODES; i++)
> > +		cutoff_node(i, start, end);
> > +
> > +#ifdef CONFIG_ACPI_NUMA
> > +	if (acpi_numa <= 0)
> > +		return -1;
> > +#endif
> > +
> > +	if (!nodes_cover_memory()) {
> > +		bad_srat();
> > +		return -1;
> > +	}
> > +
> > +	memnode_shift = compute_hash_shift(node_memblk_range,
> num_node_memblks,
> > +				memblk_nodeid);
> > +
> > +	if (memnode_shift < 0) {
> > +		printk(KERN_ERR
> > +		     "SRAT: No NUMA node hash function found. Contact
> maintainer\n");
> > +		bad_srat();
> > +		return -1;
> > +	}
> > +
> > +	nodes_or(all_nodes_parsed, memory_nodes_parsed,
> processor_nodes_parsed);
> > +
> > +	/* Finally register nodes */
> > +	for_each_node_mask(i, all_nodes_parsed)
> > +	{
> > +		u64 size = nodes[i].end - nodes[i].start;
> > +		if ( size == 0 )
> > +			printk(KERN_WARNING "SRAT: Node %u has no memory. "
> > +			       "BIOS Bug or mis-configured hardware?\n", i);
> 
> Not all archs have a BIOS so I'd say "firmware bug". Like last time, we
> usually don't do code changes together with code movement, but in this
> case it might be OK. I am also happy with a separate patch to adjust the
> two comments (this one and the one in the previous patch).

OK, I will do it.