[PATCH v3 1/2] sched: Create architecture specific sched domain distances

Tim Chen posted 2 patches 3 weeks ago
There is a newer version of this series
[PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by Tim Chen 3 weeks ago
Allow architecture specific sched domain NUMA distances that can be
modified from NUMA node distances for the purpose of building NUMA
sched domains.

The actual NUMA distances are kept separately.  This allows for NUMA
domain levels modification when building sched domains for specific
architectures.

Consolidate the recording of unique NUMA distances in an array to
sched_record_numa_dist() so the function can be reused to record NUMA
distances when the NUMA distance metric is changed.

No functional change if there's no arch specific NUMA distances
are being defined.

Co-developed-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
 include/linux/sched/topology.h |   2 +
 kernel/sched/topology.c        | 114 ++++++++++++++++++++++++++++-----
 2 files changed, 99 insertions(+), 17 deletions(-)

diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 5263746b63e8..4f58e78ca52e 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -59,6 +59,8 @@ static inline int cpu_numa_flags(void)
 #endif
 
 extern int arch_asym_cpu_priority(int cpu);
+extern int arch_sched_node_distance(int from, int to);
+extern int sched_avg_remote_numa_distance;
 
 struct sched_domain_attr {
 	int relax_domain_level;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 977e133bb8a4..6c0ff62322cb 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1591,10 +1591,13 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
 enum numa_topology_type sched_numa_topology_type;
 
 static int			sched_domains_numa_levels;
+static int			sched_numa_node_levels;
 static int			sched_domains_curr_level;
 
 int				sched_max_numa_distance;
+int				sched_avg_remote_numa_distance;
 static int			*sched_domains_numa_distance;
+static int			*sched_numa_node_distance;
 static struct cpumask		***sched_domains_numa_masks;
 #endif /* CONFIG_NUMA */
 
@@ -1808,10 +1811,10 @@ bool find_numa_distance(int distance)
 		return true;
 
 	rcu_read_lock();
-	distances = rcu_dereference(sched_domains_numa_distance);
+	distances = rcu_dereference(sched_numa_node_distance);
 	if (!distances)
 		goto unlock;
-	for (i = 0; i < sched_domains_numa_levels; i++) {
+	for (i = 0; i < sched_numa_node_levels; i++) {
 		if (distances[i] == distance) {
 			found = true;
 			break;
@@ -1887,14 +1890,32 @@ static void init_numa_topology_type(int offline_node)
 
 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
 
-void sched_init_numa(int offline_node)
+/*
+ * Architecture could modify NUMA distance, to change
+ * grouping of NUMA nodes and number of NUMA levels when creating
+ * NUMA level sched domains.
+ *
+ * One NUMA level is created for each unique
+ * arch_sched_node_distance.
+ */
+int __weak arch_sched_node_distance(int from, int to)
+{
+	return node_distance(from, to);
+}
+
+static int numa_node_dist(int i, int j)
+{
+	return node_distance(i, j);
+}
+
+static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
+		int **dist, int *levels)
+
 {
-	struct sched_domain_topology_level *tl;
 	unsigned long *distance_map;
 	int nr_levels = 0;
 	int i, j;
 	int *distances;
-	struct cpumask ***masks;
 
 	/*
 	 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the
@@ -1902,17 +1923,17 @@ void sched_init_numa(int offline_node)
 	 */
 	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
 	if (!distance_map)
-		return;
+		return -ENOMEM;
 
 	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
 	for_each_cpu_node_but(i, offline_node) {
 		for_each_cpu_node_but(j, offline_node) {
-			int distance = node_distance(i, j);
+			int distance = n_dist(i, j);
 
 			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
 				sched_numa_warn("Invalid distance value range");
 				bitmap_free(distance_map);
-				return;
+				return -EINVAL;
 			}
 
 			bitmap_set(distance_map, distance, 1);
@@ -1927,17 +1948,66 @@ void sched_init_numa(int offline_node)
 	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
 	if (!distances) {
 		bitmap_free(distance_map);
-		return;
+		return -ENOMEM;
 	}
-
 	for (i = 0, j = 0; i < nr_levels; i++, j++) {
 		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
 		distances[i] = j;
 	}
-	rcu_assign_pointer(sched_domains_numa_distance, distances);
+	*dist = distances;
+	*levels = nr_levels;
 
 	bitmap_free(distance_map);
 
+	return 0;
+}
+
+static int avg_remote_numa_distance(int offline_node)
+{
+	int i, j;
+	int distance, nr_remote = 0, total_distance = 0;
+
+	for_each_cpu_node_but(i, offline_node) {
+		for_each_cpu_node_but(j, offline_node) {
+			distance = node_distance(i, j);
+
+			if (distance >= REMOTE_DISTANCE) {
+				nr_remote++;
+				total_distance += distance;
+			}
+		}
+	}
+	if (nr_remote)
+		return total_distance / nr_remote;
+	else
+		return REMOTE_DISTANCE;
+}
+
+void sched_init_numa(int offline_node)
+{
+	struct sched_domain_topology_level *tl;
+	int nr_levels, nr_node_levels;
+	int i, j;
+	int *distances, *domain_distances;
+	struct cpumask ***masks;
+
+	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
+				   &nr_node_levels))
+		return;
+
+	WRITE_ONCE(sched_avg_remote_numa_distance,
+		   avg_remote_numa_distance(offline_node));
+
+	if (sched_record_numa_dist(offline_node,
+				   arch_sched_node_distance, &domain_distances,
+				   &nr_levels)) {
+		kfree(distances);
+		return;
+	}
+	rcu_assign_pointer(sched_numa_node_distance, distances);
+	WRITE_ONCE(sched_max_numa_distance, distances[nr_node_levels - 1]);
+	WRITE_ONCE(sched_numa_node_levels, nr_node_levels);
+
 	/*
 	 * 'nr_levels' contains the number of unique distances
 	 *
@@ -1954,6 +2024,8 @@ void sched_init_numa(int offline_node)
 	 *
 	 * We reset it to 'nr_levels' at the end of this function.
 	 */
+	rcu_assign_pointer(sched_domains_numa_distance, domain_distances);
+
 	sched_domains_numa_levels = 0;
 
 	masks = kzalloc(sizeof(void *) * nr_levels, GFP_KERNEL);
@@ -1979,10 +2051,13 @@ void sched_init_numa(int offline_node)
 			masks[i][j] = mask;
 
 			for_each_cpu_node_but(k, offline_node) {
-				if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
+				if (sched_debug() &&
+				    (arch_sched_node_distance(j, k) !=
+				     arch_sched_node_distance(k, j)))
 					sched_numa_warn("Node-distance not symmetric");
 
-				if (node_distance(j, k) > sched_domains_numa_distance[i])
+				if (arch_sched_node_distance(j, k) >
+				    sched_domains_numa_distance[i])
 					continue;
 
 				cpumask_or(mask, mask, cpumask_of_node(k));
@@ -2022,7 +2097,6 @@ void sched_init_numa(int offline_node)
 	sched_domain_topology = tl;
 
 	sched_domains_numa_levels = nr_levels;
-	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
 
 	init_numa_topology_type(offline_node);
 }
@@ -2030,14 +2104,18 @@ void sched_init_numa(int offline_node)
 
 static void sched_reset_numa(void)
 {
-	int nr_levels, *distances;
+	int nr_levels, *distances, *dom_distances;
 	struct cpumask ***masks;
 
 	nr_levels = sched_domains_numa_levels;
+	sched_numa_node_levels = 0;
 	sched_domains_numa_levels = 0;
 	sched_max_numa_distance = 0;
+	sched_avg_remote_numa_distance = 0;
 	sched_numa_topology_type = NUMA_DIRECT;
-	distances = sched_domains_numa_distance;
+	distances = sched_numa_node_distance;
+	dom_distances = sched_domains_numa_distance;
+	rcu_assign_pointer(sched_numa_node_distance, NULL);
 	rcu_assign_pointer(sched_domains_numa_distance, NULL);
 	masks = sched_domains_numa_masks;
 	rcu_assign_pointer(sched_domains_numa_masks, NULL);
@@ -2054,6 +2132,7 @@ static void sched_reset_numa(void)
 			kfree(masks[i]);
 		}
 		kfree(masks);
+		kfree(dom_distances);
 	}
 	if (sched_domain_topology_saved) {
 		kfree(sched_domain_topology);
@@ -2092,7 +2171,8 @@ void sched_domains_numa_masks_set(unsigned int cpu)
 				continue;
 
 			/* Set ourselves in the remote node's masks */
-			if (node_distance(j, node) <= sched_domains_numa_distance[i])
+			if (arch_sched_node_distance(j, node) <=
+			    sched_domains_numa_distance[i])
 				cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
 		}
 	}
-- 
2.32.0
Re: [PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by Peter Zijlstra 2 weeks, 3 days ago
On Thu, Sep 11, 2025 at 11:30:56AM -0700, Tim Chen wrote:
> Allow architecture specific sched domain NUMA distances that can be
> modified from NUMA node distances for the purpose of building NUMA
> sched domains.
> 
> The actual NUMA distances are kept separately.  This allows for NUMA
> domain levels modification when building sched domains for specific
> architectures.
> 
> Consolidate the recording of unique NUMA distances in an array to
> sched_record_numa_dist() so the function can be reused to record NUMA
> distances when the NUMA distance metric is changed.
> 
> No functional change if there's no arch specific NUMA distances
> are being defined.

Keeping both metrics side-by-side is confusing -- and not very well
justified by the above.

Is there any appreciable benefit to mixing the two like this?

> 
> Co-developed-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
> Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> ---
>  include/linux/sched/topology.h |   2 +
>  kernel/sched/topology.c        | 114 ++++++++++++++++++++++++++++-----
>  2 files changed, 99 insertions(+), 17 deletions(-)
> 
> diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
> index 5263746b63e8..4f58e78ca52e 100644
> --- a/include/linux/sched/topology.h
> +++ b/include/linux/sched/topology.h
> @@ -59,6 +59,8 @@ static inline int cpu_numa_flags(void)
>  #endif
>  
>  extern int arch_asym_cpu_priority(int cpu);
> +extern int arch_sched_node_distance(int from, int to);
> +extern int sched_avg_remote_numa_distance;
>  
>  struct sched_domain_attr {
>  	int relax_domain_level;
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 977e133bb8a4..6c0ff62322cb 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -1591,10 +1591,13 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
>  enum numa_topology_type sched_numa_topology_type;
>  
>  static int			sched_domains_numa_levels;
> +static int			sched_numa_node_levels;
>  static int			sched_domains_curr_level;
>  
>  int				sched_max_numa_distance;
> +int				sched_avg_remote_numa_distance;
>  static int			*sched_domains_numa_distance;
> +static int			*sched_numa_node_distance;
>  static struct cpumask		***sched_domains_numa_masks;
>  #endif /* CONFIG_NUMA */
>  
> @@ -1808,10 +1811,10 @@ bool find_numa_distance(int distance)
>  		return true;
>  
>  	rcu_read_lock();
> -	distances = rcu_dereference(sched_domains_numa_distance);
> +	distances = rcu_dereference(sched_numa_node_distance);
>  	if (!distances)
>  		goto unlock;
> -	for (i = 0; i < sched_domains_numa_levels; i++) {
> +	for (i = 0; i < sched_numa_node_levels; i++) {
>  		if (distances[i] == distance) {
>  			found = true;
>  			break;

I'm assuming (because its not actually stated anywhere) that
sched_numa_$FOO is based on the SLIT table, while sched_domain_$FOO is
the modified thing.

And you're saying it makes a significant difference to
preferred_group_nid()?

> +static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
> +		int **dist, int *levels)
> +

That's a coding style fail; use cino=(0:0.

>  {
> -	struct sched_domain_topology_level *tl;
>  	unsigned long *distance_map;
>  	int nr_levels = 0;
>  	int i, j;
>  	int *distances;
> -	struct cpumask ***masks;
>  
>  	/*
>  	 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the
> @@ -1902,17 +1923,17 @@ void sched_init_numa(int offline_node)
>  	 */
>  	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
>  	if (!distance_map)
> -		return;
> +		return -ENOMEM;
>  
>  	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
>  	for_each_cpu_node_but(i, offline_node) {
>  		for_each_cpu_node_but(j, offline_node) {
> -			int distance = node_distance(i, j);
> +			int distance = n_dist(i, j);
>  
>  			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
>  				sched_numa_warn("Invalid distance value range");
>  				bitmap_free(distance_map);
> -				return;
> +				return -EINVAL;
>  			}
>  
>  			bitmap_set(distance_map, distance, 1);
> @@ -1927,17 +1948,66 @@ void sched_init_numa(int offline_node)
>  	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
>  	if (!distances) {
>  		bitmap_free(distance_map);
> -		return;
> +		return -ENOMEM;
>  	}
> -
>  	for (i = 0, j = 0; i < nr_levels; i++, j++) {
>  		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
>  		distances[i] = j;
>  	}
> -	rcu_assign_pointer(sched_domains_numa_distance, distances);
> +	*dist = distances;
> +	*levels = nr_levels;
>  
>  	bitmap_free(distance_map);
>  
> +	return 0;
> +}
> +
> +static int avg_remote_numa_distance(int offline_node)
> +{
> +	int i, j;
> +	int distance, nr_remote = 0, total_distance = 0;
> +
> +	for_each_cpu_node_but(i, offline_node) {
> +		for_each_cpu_node_but(j, offline_node) {
> +			distance = node_distance(i, j);
> +
> +			if (distance >= REMOTE_DISTANCE) {
> +				nr_remote++;
> +				total_distance += distance;
> +			}
> +		}
> +	}
> +	if (nr_remote)
> +		return total_distance / nr_remote;
> +	else
> +		return REMOTE_DISTANCE;
> +}
> +
> +void sched_init_numa(int offline_node)
> +{
> +	struct sched_domain_topology_level *tl;
> +	int nr_levels, nr_node_levels;
> +	int i, j;
> +	int *distances, *domain_distances;
> +	struct cpumask ***masks;
> +
> +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> +				   &nr_node_levels))
> +		return;
> +
> +	WRITE_ONCE(sched_avg_remote_numa_distance,
> +		   avg_remote_numa_distance(offline_node));

What is the point of all this? sched_avg_remote_numa_distance isn't
actually used anywhere. I'm thinking it doesn't want to be in this patch
at the very least.
Re: [PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by Tim Chen 2 weeks, 3 days ago
On Mon, 2025-09-15 at 14:37 +0200, Peter Zijlstra wrote:
> On Thu, Sep 11, 2025 at 11:30:56AM -0700, Tim Chen wrote:
> > Allow architecture specific sched domain NUMA distances that can be
> > modified from NUMA node distances for the purpose of building NUMA
> > sched domains.
> > 
> > The actual NUMA distances are kept separately.  This allows for NUMA
> > domain levels modification when building sched domains for specific
> > architectures.
> > 
> > Consolidate the recording of unique NUMA distances in an array to
> > sched_record_numa_dist() so the function can be reused to record NUMA
> > distances when the NUMA distance metric is changed.
> > 
> > No functional change if there's no arch specific NUMA distances
> > are being defined.
> 
> Keeping both metrics side-by-side is confusing -- and not very well
> justified by the above.
> 
> Is there any appreciable benefit to mixing the two like this?

We can set sched_numa_node_distance to point to the same array as
sched_numa_node_distance if no arch specific NUMA distances
are defined.  It would add some additional wrinkles to
the record logic and deallocation logic to detect
whether we have kept them the same.  I've opted to always
have two metrics to keep the code logic simpler.

Let me know if you prefer otherwise.

  
> 
> > 
> > Co-developed-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
> > Signed-off-by: Vinicius Costa Gomes <vinicius.gomes@intel.com>
> > Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> > ---
> >  include/linux/sched/topology.h |   2 +
> >  kernel/sched/topology.c        | 114 ++++++++++++++++++++++++++++-----
> >  2 files changed, 99 insertions(+), 17 deletions(-)
> > 
> > diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
> > index 5263746b63e8..4f58e78ca52e 100644
> > --- a/include/linux/sched/topology.h
> > +++ b/include/linux/sched/topology.h
> > @@ -59,6 +59,8 @@ static inline int cpu_numa_flags(void)
> >  #endif
> >  
> >  extern int arch_asym_cpu_priority(int cpu);
> > +extern int arch_sched_node_distance(int from, int to);
> > +extern int sched_avg_remote_numa_distance;
> >  
> >  struct sched_domain_attr {
> >  	int relax_domain_level;
> > diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> > index 977e133bb8a4..6c0ff62322cb 100644
> > --- a/kernel/sched/topology.c
> > +++ b/kernel/sched/topology.c
> > @@ -1591,10 +1591,13 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
> >  enum numa_topology_type sched_numa_topology_type;
> >  
> >  static int			sched_domains_numa_levels;
> > +static int			sched_numa_node_levels;
> >  static int			sched_domains_curr_level;
> >  
> >  int				sched_max_numa_distance;
> > +int				sched_avg_remote_numa_distance;
> >  static int			*sched_domains_numa_distance;
> > +static int			*sched_numa_node_distance;
> >  static struct cpumask		***sched_domains_numa_masks;
> >  #endif /* CONFIG_NUMA */
> >  
> > @@ -1808,10 +1811,10 @@ bool find_numa_distance(int distance)
> >  		return true;
> >  
> >  	rcu_read_lock();
> > -	distances = rcu_dereference(sched_domains_numa_distance);
> > +	distances = rcu_dereference(sched_numa_node_distance);
> >  	if (!distances)
> >  		goto unlock;
> > -	for (i = 0; i < sched_domains_numa_levels; i++) {
> > +	for (i = 0; i < sched_numa_node_levels; i++) {
> >  		if (distances[i] == distance) {
> >  			found = true;
> >  			break;
> 
> I'm assuming (because its not actually stated anywhere) that
> sched_numa_$FOO is based on the SLIT table, while sched_domain_$FOO is
> the modified thing.
> 
> And you're saying it makes a significant difference to
> preferred_group_nid()?
> 
> > +static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
> > +		int **dist, int *levels)
> > +
> 
> That's a coding style fail; use cino=(0:0.
> 
> >  {
> > -	struct sched_domain_topology_level *tl;
> >  	unsigned long *distance_map;
> >  	int nr_levels = 0;
> >  	int i, j;
> >  	int *distances;
> > -	struct cpumask ***masks;
> >  
> >  	/*
> >  	 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the
> > @@ -1902,17 +1923,17 @@ void sched_init_numa(int offline_node)
> >  	 */
> >  	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
> >  	if (!distance_map)
> > -		return;
> > +		return -ENOMEM;
> >  
> >  	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
> >  	for_each_cpu_node_but(i, offline_node) {
> >  		for_each_cpu_node_but(j, offline_node) {
> > -			int distance = node_distance(i, j);
> > +			int distance = n_dist(i, j);
> >  
> >  			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
> >  				sched_numa_warn("Invalid distance value range");
> >  				bitmap_free(distance_map);
> > -				return;
> > +				return -EINVAL;
> >  			}
> >  
> >  			bitmap_set(distance_map, distance, 1);
> > @@ -1927,17 +1948,66 @@ void sched_init_numa(int offline_node)
> >  	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
> >  	if (!distances) {
> >  		bitmap_free(distance_map);
> > -		return;
> > +		return -ENOMEM;
> >  	}
> > -
> >  	for (i = 0, j = 0; i < nr_levels; i++, j++) {
> >  		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
> >  		distances[i] = j;
> >  	}
> > -	rcu_assign_pointer(sched_domains_numa_distance, distances);
> > +	*dist = distances;
> > +	*levels = nr_levels;
> >  
> >  	bitmap_free(distance_map);
> >  
> > +	return 0;
> > +}
> > +
> > +static int avg_remote_numa_distance(int offline_node)
> > +{
> > +	int i, j;
> > +	int distance, nr_remote = 0, total_distance = 0;
> > +
> > +	for_each_cpu_node_but(i, offline_node) {
> > +		for_each_cpu_node_but(j, offline_node) {
> > +			distance = node_distance(i, j);
> > +
> > +			if (distance >= REMOTE_DISTANCE) {
> > +				nr_remote++;
> > +				total_distance += distance;
> > +			}
> > +		}
> > +	}
> > +	if (nr_remote)
> > +		return total_distance / nr_remote;
> > +	else
> > +		return REMOTE_DISTANCE;
> > +}
> > +
> > +void sched_init_numa(int offline_node)
> > +{
> > +	struct sched_domain_topology_level *tl;
> > +	int nr_levels, nr_node_levels;
> > +	int i, j;
> > +	int *distances, *domain_distances;
> > +	struct cpumask ***masks;
> > +
> > +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> > +				   &nr_node_levels))
> > +		return;
> > +
> > +	WRITE_ONCE(sched_avg_remote_numa_distance,
> > +		   avg_remote_numa_distance(offline_node));
> 
> What is the point of all this? sched_avg_remote_numa_distance isn't
> actually used anywhere. I'm thinking it doesn't want to be in this patch
> at the very least.

sched_avg_remote_numa_distance actually could change when we offline/online a
node.  I think arch_sched_node_distance(i, j) needs to be changed to
arch_sched_node_distance(i, j, offline_node) so it knows not to include
offline_node in its avg distance computation.  I will do that then.

Thanks for reviewing the code.

Tim
Re: [PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by Tim Chen 2 weeks, 2 days ago
On Mon, 2025-09-15 at 10:13 -0700, Tim Chen wrote:
> On Mon, 2025-09-15 at 14:37 +0200, Peter Zijlstra wrote:
> > On Thu, Sep 11, 2025 at 11:30:56AM -0700, Tim Chen wrote:

<snip>

> > > +
> > > +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> > > +				   &nr_node_levels))
> > > +		return;
> > > +
> > > +	WRITE_ONCE(sched_avg_remote_numa_distance,
> > > +		   avg_remote_numa_distance(offline_node));
> > 
> > What is the point of all this? sched_avg_remote_numa_distance isn't
> > actually used anywhere. I'm thinking it doesn't want to be in this patch
> > at the very least.
> 
> sched_avg_remote_numa_distance actually could change when we offline/online a
> node.  I think arch_sched_node_distance(i, j) needs to be changed to
> arch_sched_node_distance(i, j, offline_node) so it knows not to include
> offline_node in its avg distance computation.  I will do that then.
> 

On second thought, GNR and CWF topology only need a remote average
distance.  Whether the offline node is entered into the computation
does not change the resulting sched domain topology.  So paasing
offline node is not necessary and I will update the code as such
and move the average distance computation to x86 specific file.

Tim
Re: [PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by Chen, Yu C 2 weeks, 6 days ago
On 9/12/2025 2:30 AM, Tim Chen wrote:
> Allow architecture specific sched domain NUMA distances that can be
> modified from NUMA node distances for the purpose of building NUMA
> sched domains.
> 
> The actual NUMA distances are kept separately.  This allows for NUMA
> domain levels modification when building sched domains for specific
> architectures.
> 
> Consolidate the recording of unique NUMA distances in an array to
> sched_record_numa_dist() so the function can be reused to record NUMA
> distances when the NUMA distance metric is changed.
> 
> No functional change if there's no arch specific NUMA distances
> are being defined.
> 

[snip]

> +
> +void sched_init_numa(int offline_node)
> +{
> +	struct sched_domain_topology_level *tl;
> +	int nr_levels, nr_node_levels;
> +	int i, j;
> +	int *distances, *domain_distances;
> +	struct cpumask ***masks;
> +
> +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> +				   &nr_node_levels))
> +		return;
> +
> +	WRITE_ONCE(sched_avg_remote_numa_distance,
> +		   avg_remote_numa_distance(offline_node));
> +
> +	if (sched_record_numa_dist(offline_node,
> +				   arch_sched_node_distance, &domain_distances,
> +				   &nr_levels)) {
> +		kfree(distances);
> +		return;
> +	}
> +	rcu_assign_pointer(sched_numa_node_distance, distances);
> +	WRITE_ONCE(sched_max_numa_distance, distances[nr_node_levels - 1]);

[snip]

> @@ -2022,7 +2097,6 @@ void sched_init_numa(int offline_node)
>   	sched_domain_topology = tl;
>   
>   	sched_domains_numa_levels = nr_levels;
> -	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
>   

Before this patch, sched_max_numa_distance is assigned a valid
value at the end of sched_init_numa(), after sched_domains_numa_masks
  and sched_domain_topology_level are successfully created or appended
, the kzalloc() call should succeed.

Now we assign sched_max_numa_distance earlier, without considering
the status of NUMA sched domains. I think this is intended, because
  sched domains are only for generic load balancing, while
  sched_max_numa_distance is for NUMA load balancing; in theory, they
use different metrics in their strategies. Thus, this change should
not cause any issues.

 From my understanding,

Reviewed-by: Chen Yu <yu.c.chen@intel.com>

thanks,
Chenyu
Re: [PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by Tim Chen 2 weeks, 3 days ago
On Fri, 2025-09-12 at 13:24 +0800, Chen, Yu C wrote:
> On 9/12/2025 2:30 AM, Tim Chen wrote:
> > Allow architecture specific sched domain NUMA distances that can be
> > modified from NUMA node distances for the purpose of building NUMA
> > sched domains.
> > 
> > The actual NUMA distances are kept separately.  This allows for NUMA
> > domain levels modification when building sched domains for specific
> > architectures.
> > 
> > Consolidate the recording of unique NUMA distances in an array to
> > sched_record_numa_dist() so the function can be reused to record NUMA
> > distances when the NUMA distance metric is changed.
> > 
> > No functional change if there's no arch specific NUMA distances
> > are being defined.
> > 
> 
> [snip]
> 
> > +
> > +void sched_init_numa(int offline_node)
> > +{
> > +	struct sched_domain_topology_level *tl;
> > +	int nr_levels, nr_node_levels;
> > +	int i, j;
> > +	int *distances, *domain_distances;
> > +	struct cpumask ***masks;
> > +
> > +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> > +				   &nr_node_levels))
> > +		return;
> > +
> > +	WRITE_ONCE(sched_avg_remote_numa_distance,
> > +		   avg_remote_numa_distance(offline_node));
> > +
> > +	if (sched_record_numa_dist(offline_node,
> > +				   arch_sched_node_distance, &domain_distances,
> > +				   &nr_levels)) {
> > +		kfree(distances);
> > +		return;
> > +	}
> > +	rcu_assign_pointer(sched_numa_node_distance, distances);
> > +	WRITE_ONCE(sched_max_numa_distance, distances[nr_node_levels - 1]);
> 
> [snip]
> 
> > @@ -2022,7 +2097,6 @@ void sched_init_numa(int offline_node)
> >   	sched_domain_topology = tl;
> >   
> >   	sched_domains_numa_levels = nr_levels;
> > -	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
> >   
> 
> Before this patch, sched_max_numa_distance is assigned a valid
> value at the end of sched_init_numa(), after sched_domains_numa_masks
>   and sched_domain_topology_level are successfully created or appended
> , the kzalloc() call should succeed.
> 
> Now we assign sched_max_numa_distance earlier, without considering
> the status of NUMA sched domains. I think this is intended, because
>   sched domains are only for generic load balancing, while
>   sched_max_numa_distance is for NUMA load balancing; in theory, they
> use different metrics in their strategies. Thus, this change should
> not cause any issues.
> 
>  From my understanding,
> 
> Reviewed-by: Chen Yu <yu.c.chen@intel.com>
> 
> thanks,
> Chenyu
Re: [PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by Tim Chen 2 weeks, 3 days ago
On Fri, 2025-09-12 at 13:24 +0800, Chen, Yu C wrote:
> On 9/12/2025 2:30 AM, Tim Chen wrote:
> > Allow architecture specific sched domain NUMA distances that can be
> > modified from NUMA node distances for the purpose of building NUMA
> > sched domains.
> > 
> > The actual NUMA distances are kept separately.  This allows for NUMA
> > domain levels modification when building sched domains for specific
> > architectures.
> > 
> > Consolidate the recording of unique NUMA distances in an array to
> > sched_record_numa_dist() so the function can be reused to record NUMA
> > distances when the NUMA distance metric is changed.
> > 
> > No functional change if there's no arch specific NUMA distances
> > are being defined.
> > 
> 
> [snip]
> 
> > +
> > +void sched_init_numa(int offline_node)
> > +{
> > +	struct sched_domain_topology_level *tl;
> > +	int nr_levels, nr_node_levels;
> > +	int i, j;
> > +	int *distances, *domain_distances;
> > +	struct cpumask ***masks;
> > +
> > +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> > +				   &nr_node_levels))
> > +		return;
> > +
> > +	WRITE_ONCE(sched_avg_remote_numa_distance,
> > +		   avg_remote_numa_distance(offline_node));
> > +
> > +	if (sched_record_numa_dist(offline_node,
> > +				   arch_sched_node_distance, &domain_distances,
> > +				   &nr_levels)) {
> > +		kfree(distances);
> > +		return;
> > +	}
> > +	rcu_assign_pointer(sched_numa_node_distance, distances);
> > +	WRITE_ONCE(sched_max_numa_distance, distances[nr_node_levels - 1]);
> 
> [snip]
> 
> > @@ -2022,7 +2097,6 @@ void sched_init_numa(int offline_node)
> >   	sched_domain_topology = tl;
> >   
> >   	sched_domains_numa_levels = nr_levels;
> > -	WRITE_ONCE(sched_max_numa_distance, sched_domains_numa_distance[nr_levels - 1]);
> >   
> 
> Before this patch, sched_max_numa_distance is assigned a valid
> value at the end of sched_init_numa(), after sched_domains_numa_masks
>   and sched_domain_topology_level are successfully created or appended
> , the kzalloc() call should succeed.
> 
> Now we assign sched_max_numa_distance earlier, without considering
> the status of NUMA sched domains. I think this is intended, because
>   sched domains are only for generic load balancing, while
>   sched_max_numa_distance is for NUMA load balancing; in theory, they
> use different metrics in their strategies. Thus, this change should
> not cause any issues.

Yes, now sched_max_numa_distance is used in conjunction with
sched_numa_node_distance.  So putting them together is okay.

> 
>  From my understanding,
> 
> Reviewed-by: Chen Yu <yu.c.chen@intel.com>

Thanks.

Tim

> 
> thanks,
> Chenyu
Re: [PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by K Prateek Nayak 2 weeks, 6 days ago
Hello Tim,

On 9/12/2025 12:00 AM, Tim Chen wrote:
> +static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
> +		int **dist, int *levels)
> +

nit. Is the blank line above intentional?

Also personally I prefer breaking the two lines above as:

static int
sched_record_numa_dist(int offline_node, int (*n_dist)(int, int), int **dist, int *levels)
{
	...
}

>  {
> -	struct sched_domain_topology_level *tl;
>  	unsigned long *distance_map;

Since we are breaking this out and adding return values, can we also
cleanup that bitmap_free() before every return with __free(bitmap) like:

(Only build tested)

diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 6c0ff62322cb..baa79e79ced8 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1910,9 +1910,8 @@ static int numa_node_dist(int i, int j)
 
 static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
 		int **dist, int *levels)
-
 {
-	unsigned long *distance_map;
+	unsigned long *distance_map __free(bitmap) = NULL;
 	int nr_levels = 0;
 	int i, j;
 	int *distances;
@@ -1932,7 +1931,6 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
 
 			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
 				sched_numa_warn("Invalid distance value range");
-				bitmap_free(distance_map);
 				return -EINVAL;
 			}
 
@@ -1946,19 +1944,17 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
 	nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
 
 	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
-	if (!distances) {
-		bitmap_free(distance_map);
+	if (!distances)
 		return -ENOMEM;
-	}
+
 	for (i = 0, j = 0; i < nr_levels; i++, j++) {
 		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
 		distances[i] = j;
 	}
+
 	*dist = distances;
 	*levels = nr_levels;
 
-	bitmap_free(distance_map);
-
 	return 0;
 }
 
---

>  	int nr_levels = 0;
>  	int i, j;
>  	int *distances;
> -	struct cpumask ***masks;
>  
>  	/*
>  	 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the
> @@ -1902,17 +1923,17 @@ void sched_init_numa(int offline_node)
>  	 */
>  	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
>  	if (!distance_map)
> -		return;
> +		return -ENOMEM;
>  
>  	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
>  	for_each_cpu_node_but(i, offline_node) {
>  		for_each_cpu_node_but(j, offline_node) {
> -			int distance = node_distance(i, j);
> +			int distance = n_dist(i, j);
>  
>  			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
>  				sched_numa_warn("Invalid distance value range");
>  				bitmap_free(distance_map);
> -				return;
> +				return -EINVAL;
>  			}
>  
>  			bitmap_set(distance_map, distance, 1);
> @@ -1927,17 +1948,66 @@ void sched_init_numa(int offline_node)
>  	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
>  	if (!distances) {
>  		bitmap_free(distance_map);
> -		return;
> +		return -ENOMEM;
>  	}
> -
>  	for (i = 0, j = 0; i < nr_levels; i++, j++) {
>  		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
>  		distances[i] = j;
>  	}
> -	rcu_assign_pointer(sched_domains_numa_distance, distances);
> +	*dist = distances;
> +	*levels = nr_levels;
>  
>  	bitmap_free(distance_map);
>  
> +	return 0;
> +}
> +
> +static int avg_remote_numa_distance(int offline_node)
> +{
> +	int i, j;
> +	int distance, nr_remote = 0, total_distance = 0;
> +
> +	for_each_cpu_node_but(i, offline_node) {
> +		for_each_cpu_node_but(j, offline_node) {
> +			distance = node_distance(i, j);
> +
> +			if (distance >= REMOTE_DISTANCE) {
> +				nr_remote++;
> +				total_distance += distance;
> +			}
> +		}
> +	}
> +	if (nr_remote)
> +		return total_distance / nr_remote;
> +	else
> +		return REMOTE_DISTANCE;
> +}
> +
> +void sched_init_numa(int offline_node)
> +{
> +	struct sched_domain_topology_level *tl;
> +	int nr_levels, nr_node_levels;
> +	int i, j;
> +	int *distances, *domain_distances;
> +	struct cpumask ***masks;
> +
> +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> +				   &nr_node_levels))
> +		return;
> +
> +	WRITE_ONCE(sched_avg_remote_numa_distance,
> +		   avg_remote_numa_distance(offline_node));

nit.

Can add a small comment here saying arch_sched_node_distance() may
depend on sched_avg_remote_numa_distance and requires it to be
initialized correctly before computing domain_distances.

Apart from those nitpicks, the changes look good to me. Please feel free
to include:

Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>

-- 
Thanks and Regards,
Prateek

> +
> +	if (sched_record_numa_dist(offline_node,
> +				   arch_sched_node_distance, &domain_distances,
> +				   &nr_levels)) {
> +		kfree(distances);
> +		return;
> +	}
> +	rcu_assign_pointer(sched_numa_node_distance, distances);
> +	WRITE_ONCE(sched_max_numa_distance, distances[nr_node_levels - 1]);
> +	WRITE_ONCE(sched_numa_node_levels, nr_node_levels);
> +
>  	/*
>  	 * 'nr_levels' contains the number of unique distances
>  	 *
Re: [PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by Tim Chen 2 weeks, 3 days ago
On Fri, 2025-09-12 at 08:53 +0530, K Prateek Nayak wrote:
> Hello Tim,
> 
> On 9/12/2025 12:00 AM, Tim Chen wrote:
> > +static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
> > +		int **dist, int *levels)
> > +
> 
> nit. Is the blank line above intentional?
> 
> Also personally I prefer breaking the two lines above as:
> 
> static int
> sched_record_numa_dist(int offline_node, int (*n_dist)(int, int), int **dist, int *levels)

That would exceed 80 characters.  So we would still need to move some parameters to a different
line to keep within the limit.

> {
> 	...
> }
> 
> >  {
> > -	struct sched_domain_topology_level *tl;
> >  	unsigned long *distance_map;
> 
> Since we are breaking this out and adding return values, can we also
> cleanup that bitmap_free() before every return with __free(bitmap) like:
> 
> (Only build tested)

Yes, __kfree will be better here.

> 
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 6c0ff62322cb..baa79e79ced8 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -1910,9 +1910,8 @@ static int numa_node_dist(int i, int j)
>  
>  static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
>  		int **dist, int *levels)
> -
>  {
> -	unsigned long *distance_map;
> +	unsigned long *distance_map __free(bitmap) = NULL;
>  	int nr_levels = 0;
>  	int i, j;
>  	int *distances;
> @@ -1932,7 +1931,6 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
>  
>  			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
>  				sched_numa_warn("Invalid distance value range");
> -				bitmap_free(distance_map);
>  				return -EINVAL;
>  			}
>  
> @@ -1946,19 +1944,17 @@ static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
>  	nr_levels = bitmap_weight(distance_map, NR_DISTANCE_VALUES);
>  
>  	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
> -	if (!distances) {
> -		bitmap_free(distance_map);
> +	if (!distances)
>  		return -ENOMEM;
> -	}
> +
>  	for (i = 0, j = 0; i < nr_levels; i++, j++) {
>  		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
>  		distances[i] = j;
>  	}
> +
>  	*dist = distances;
>  	*levels = nr_levels;
>  
> -	bitmap_free(distance_map);
> -
>  	return 0;
>  }
>  
> ---
> 
> >  	int nr_levels = 0;
> >  	int i, j;
> >  	int *distances;
> > -	struct cpumask ***masks;
> >  
> >  	/*
> >  	 * O(nr_nodes^2) de-duplicating selection sort -- in order to find the
> > @@ -1902,17 +1923,17 @@ void sched_init_numa(int offline_node)
> >  	 */
> >  	distance_map = bitmap_alloc(NR_DISTANCE_VALUES, GFP_KERNEL);
> >  	if (!distance_map)
> > -		return;
> > +		return -ENOMEM;
> >  
> >  	bitmap_zero(distance_map, NR_DISTANCE_VALUES);
> >  	for_each_cpu_node_but(i, offline_node) {
> >  		for_each_cpu_node_but(j, offline_node) {
> > -			int distance = node_distance(i, j);
> > +			int distance = n_dist(i, j);
> >  
> >  			if (distance < LOCAL_DISTANCE || distance >= NR_DISTANCE_VALUES) {
> >  				sched_numa_warn("Invalid distance value range");
> >  				bitmap_free(distance_map);
> > -				return;
> > +				return -EINVAL;
> >  			}
> >  
> >  			bitmap_set(distance_map, distance, 1);
> > @@ -1927,17 +1948,66 @@ void sched_init_numa(int offline_node)
> >  	distances = kcalloc(nr_levels, sizeof(int), GFP_KERNEL);
> >  	if (!distances) {
> >  		bitmap_free(distance_map);
> > -		return;
> > +		return -ENOMEM;
> >  	}
> > -
> >  	for (i = 0, j = 0; i < nr_levels; i++, j++) {
> >  		j = find_next_bit(distance_map, NR_DISTANCE_VALUES, j);
> >  		distances[i] = j;
> >  	}
> > -	rcu_assign_pointer(sched_domains_numa_distance, distances);
> > +	*dist = distances;
> > +	*levels = nr_levels;
> >  
> >  	bitmap_free(distance_map);
> >  
> > +	return 0;
> > +}
> > +
> > +static int avg_remote_numa_distance(int offline_node)
> > +{
> > +	int i, j;
> > +	int distance, nr_remote = 0, total_distance = 0;
> > +
> > +	for_each_cpu_node_but(i, offline_node) {
> > +		for_each_cpu_node_but(j, offline_node) {
> > +			distance = node_distance(i, j);
> > +
> > +			if (distance >= REMOTE_DISTANCE) {
> > +				nr_remote++;
> > +				total_distance += distance;
> > +			}
> > +		}
> > +	}
> > +	if (nr_remote)
> > +		return total_distance / nr_remote;
> > +	else
> > +		return REMOTE_DISTANCE;
> > +}
> > +
> > +void sched_init_numa(int offline_node)
> > +{
> > +	struct sched_domain_topology_level *tl;
> > +	int nr_levels, nr_node_levels;
> > +	int i, j;
> > +	int *distances, *domain_distances;
> > +	struct cpumask ***masks;
> > +
> > +	if (sched_record_numa_dist(offline_node, numa_node_dist, &distances,
> > +				   &nr_node_levels))
> > +		return;
> > +
> > +	WRITE_ONCE(sched_avg_remote_numa_distance,
> > +		   avg_remote_numa_distance(offline_node));
> 
> nit.
> 
> Can add a small comment here saying arch_sched_node_distance() may
> depend on sched_avg_remote_numa_distance and requires it to be
> initialized correctly before computing domain_distances.

Sure.

Thanks for the review.

Tim

> 
> Apart from those nitpicks, the changes look good to me. Please feel free
> to include:
> 
> Reviewed-by: K Prateek Nayak <kprateek.nayak@amd.com>
Re: [PATCH v3 1/2] sched: Create architecture specific sched domain distances
Posted by K Prateek Nayak 2 weeks, 1 day ago
Hello Tim,

On 9/15/2025 10:14 PM, Tim Chen wrote:
> On Fri, 2025-09-12 at 08:53 +0530, K Prateek Nayak wrote:
>> Hello Tim,
>>
>> On 9/12/2025 12:00 AM, Tim Chen wrote:
>>> +static int sched_record_numa_dist(int offline_node, int (*n_dist)(int, int),
>>> +		int **dist, int *levels)
>>> +
>> nit. Is the blank line above intentional?
>>
>> Also personally I prefer breaking the two lines above as:
>>
>> static int
>> sched_record_numa_dist(int offline_node, int (*n_dist)(int, int), int **dist, int *levels)
> That would exceed 80 characters.  So we would still need to move some parameters to a different
> line to keep within the limit.

Well build_balance_mask() in the same file follows this pattern and
exceeds 80 characters. Maybe it is alright as long as is under 100
characters :)

-- 
Thanks and Regards,
Prateek