[PATCH] sched/topology: Fix memory leak in the error path of sched_init_numa

Luo Gengkun posted 1 patch 2 months, 1 week ago
There is a newer version of this series
kernel/sched/topology.c | 33 +++++++++++++++++++++------------
1 file changed, 21 insertions(+), 12 deletions(-)
[PATCH] sched/topology: Fix memory leak in the error path of sched_init_numa
Posted by Luo Gengkun 2 months, 1 week ago
In sched_init_numa, masks are used to store memory, but the error path
returns directly without freeing the allocated memory.
To fix this, the freeing logic in sched_reset_numa can be extraced into a
new function, free_masks, which can be called on the error path.

Fixes: 0fb3978b0aac ("sched/numa: Fix NUMA topology for systems with CPU-less nodes")
Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
---
 kernel/sched/topology.c | 33 +++++++++++++++++++++------------
 1 file changed, 21 insertions(+), 12 deletions(-)

diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 444bdfdab731..fd03bb6669f5 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1924,6 +1924,20 @@ static void init_numa_topology_type(int offline_node)
 
 #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
 
+static void free_masks(struct cpumask ***masks, int nr_levels)
+{
+	int i, j;
+
+	for (i = 0; i < nr_levels && masks; i++) {
+		if (!masks[i])
+			continue;
+		for_each_node(j)
+			kfree(masks[i][j]);
+		kfree(masks[i]);
+	}
+	kfree(masks);
+}
+
 void sched_init_numa(int offline_node)
 {
 	struct sched_domain_topology_level *tl;
@@ -2003,15 +2017,19 @@ void sched_init_numa(int offline_node)
 	 */
 	for (i = 0; i < nr_levels; i++) {
 		masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
-		if (!masks[i])
+		if (!masks[i]) {
+			free_masks(masks, nr_levels);
 			return;
+		}
 
 		for_each_cpu_node_but(j, offline_node) {
 			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
 			int k;
 
-			if (!mask)
+			if (!mask) {
+				free_masks(masks, nr_levels);
 				return;
+			}
 
 			masks[i][j] = mask;
 
@@ -2079,18 +2097,9 @@ static void sched_reset_numa(void)
 	masks = sched_domains_numa_masks;
 	rcu_assign_pointer(sched_domains_numa_masks, NULL);
 	if (distances || masks) {
-		int i, j;
-
 		synchronize_rcu();
 		kfree(distances);
-		for (i = 0; i < nr_levels && masks; i++) {
-			if (!masks[i])
-				continue;
-			for_each_node(j)
-				kfree(masks[i][j]);
-			kfree(masks[i]);
-		}
-		kfree(masks);
+		free_masks(masks, nr_levels);
 	}
 	if (sched_domain_topology_saved) {
 		kfree(sched_domain_topology);
-- 
2.34.1
Re: [PATCH] sched/topology: Fix memory leak in the error path of sched_init_numa
Posted by Huang, Ying 2 months, 1 week ago
Luo Gengkun <luogengkun@huaweicloud.com> writes:

> In sched_init_numa, masks are used to store memory, but the error path
> returns directly without freeing the allocated memory.
> To fix this, the freeing logic in sched_reset_numa can be extraced into a
> new function, free_masks, which can be called on the error path.

Good catch!  Thanks!

> Fixes: 0fb3978b0aac ("sched/numa: Fix NUMA topology for systems with CPU-less nodes")
> Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
> ---
>  kernel/sched/topology.c | 33 +++++++++++++++++++++------------
>  1 file changed, 21 insertions(+), 12 deletions(-)
>
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 444bdfdab731..fd03bb6669f5 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -1924,6 +1924,20 @@ static void init_numa_topology_type(int offline_node)
>  
>  #define NR_DISTANCE_VALUES (1 << DISTANCE_BITS)
>  
> +static void free_masks(struct cpumask ***masks, int nr_levels)

The function name appears too general.  How about __sched_free_masks()?

> +{
> +	int i, j;
> +
> +	for (i = 0; i < nr_levels && masks; i++) {
> +		if (!masks[i])
> +			continue;
> +		for_each_node(j)
> +			kfree(masks[i][j]);
> +		kfree(masks[i]);
> +	}
> +	kfree(masks);
> +}
> +
>  void sched_init_numa(int offline_node)
>  {
>  	struct sched_domain_topology_level *tl;
> @@ -2003,15 +2017,19 @@ void sched_init_numa(int offline_node)
>  	 */
>  	for (i = 0; i < nr_levels; i++) {
>  		masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL);
> -		if (!masks[i])
> +		if (!masks[i]) {
> +			free_masks(masks, nr_levels);
>  			return;
> +		}
>  
>  		for_each_cpu_node_but(j, offline_node) {
>  			struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
>  			int k;
>  
> -			if (!mask)
> +			if (!mask) {
> +				free_masks(masks, nr_levels);
>  				return;
> +			}
>  
>  			masks[i][j] = mask;
>  
> @@ -2079,18 +2097,9 @@ static void sched_reset_numa(void)
>  	masks = sched_domains_numa_masks;
>  	rcu_assign_pointer(sched_domains_numa_masks, NULL);
>  	if (distances || masks) {
> -		int i, j;
> -
>  		synchronize_rcu();
>  		kfree(distances);
> -		for (i = 0; i < nr_levels && masks; i++) {
> -			if (!masks[i])
> -				continue;
> -			for_each_node(j)
> -				kfree(masks[i][j]);
> -			kfree(masks[i]);
> -		}
> -		kfree(masks);
> +		free_masks(masks, nr_levels);
>  	}
>  	if (sched_domain_topology_saved) {
>  		kfree(sched_domain_topology);

Otherwise, the patch LGTM.  Feel free to add my

Reviewed-by: Huang Ying <ying.huang@linux.alibaba.com>

in the future versions.

---
Best Regards,
Huang, Ying