[patch 16/25] debugobjects: Rework object freeing

Thomas Gleixner posted 25 patches 1 month, 3 weeks ago
[patch 16/25] debugobjects: Rework object freeing
Posted by Thomas Gleixner 1 month, 3 weeks ago
__free_object() is uncomprehensibly complex. The same can be achieved by:

   1) Adding the object to the per CPU pool

   2) If that pool is full, move a batch of objects into the global pool
      or if the global pool is full into the to free pool

This also prepares for batch processing.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 lib/debugobjects.c |   99 ++++++++++++-----------------------------------------
 1 file changed, 24 insertions(+), 75 deletions(-)

--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -199,6 +199,27 @@ static struct debug_obj *pcpu_alloc(void
 	}
 }
 
+static void pcpu_free(struct debug_obj *obj)
+{
+	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
+
+	lockdep_assert_irqs_disabled();
+
+	hlist_add_head(&obj->node, &pcp->objects);
+	pcp->cnt++;
+
+	/* Pool full ? */
+	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
+		return;
+
+	/* Remove a batch from the per CPU pool */
+	guard(raw_spinlock)(&pool_lock);
+	/* Try to fit the batch into the pool_global first */
+	if (!pool_move_batch(&pool_global, pcp))
+		pool_move_batch(&pool_to_free, pcp);
+	obj_pool_used -= ODEBUG_BATCH_SIZE;
+}
+
 static void free_object_list(struct hlist_head *head)
 {
 	struct hlist_node *tmp;
@@ -375,83 +396,11 @@ static void free_obj_work(struct work_st
 
 static void __free_object(struct debug_obj *obj)
 {
-	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
-	struct obj_pool *percpu_pool;
-	int lookahead_count = 0;
-	bool work;
-
 	guard(irqsave)();
-
-	if (unlikely(!obj_cache)) {
+	if (likely(obj_cache))
+		pcpu_free(obj);
+	else
 		hlist_add_head(&obj->node, &pool_boot);
-		return;
-	}
-
-	/*
-	 * Try to free it into the percpu pool first.
-	 */
-	percpu_pool = this_cpu_ptr(&pool_pcpu);
-	if (percpu_pool->cnt < ODEBUG_POOL_PERCPU_SIZE) {
-		hlist_add_head(&obj->node, &percpu_pool->objects);
-		percpu_pool->cnt++;
-		return;
-	}
-
-	/*
-	 * As the percpu pool is full, look ahead and pull out a batch
-	 * of objects from the percpu pool and free them as well.
-	 */
-	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
-		objs[lookahead_count] = __alloc_object(&percpu_pool->objects);
-		if (!objs[lookahead_count])
-			break;
-		percpu_pool->cnt--;
-	}
-
-	raw_spin_lock(&pool_lock);
-	work = (pool_global.cnt > pool_global.max_cnt) && obj_cache &&
-	       (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX);
-	obj_pool_used--;
-
-	if (work) {
-		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
-		hlist_add_head(&obj->node, &pool_to_free.objects);
-		if (lookahead_count) {
-			WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + lookahead_count);
-			obj_pool_used -= lookahead_count;
-			while (lookahead_count) {
-				hlist_add_head(&objs[--lookahead_count]->node,
-					       &pool_to_free.objects);
-			}
-		}
-
-		if ((pool_global.cnt > pool_global.max_cnt) &&
-		    (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX)) {
-			int i;
-
-			/*
-			 * Free one more batch of objects from obj_pool.
-			 */
-			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
-				obj = __alloc_object(&pool_global.objects);
-				hlist_add_head(&obj->node, &pool_to_free.objects);
-				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
-				WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
-			}
-		}
-	} else {
-		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
-		hlist_add_head(&obj->node, &pool_global.objects);
-		if (lookahead_count) {
-			WRITE_ONCE(pool_global.cnt, pool_global.cnt + lookahead_count);
-			obj_pool_used -= lookahead_count;
-			while (lookahead_count) {
-				hlist_add_head(&objs[--lookahead_count]->node,
-					       &pool_global.objects);
-			}
-		}
-	}
-	raw_spin_unlock(&pool_lock);
 }
 
 /*
Re: [patch 16/25] debugobjects: Rework object freeing
Posted by Leizhen (ThunderTown) 1 month, 2 weeks ago

On 2024/10/8 0:50, Thomas Gleixner wrote:
> __free_object() is uncomprehensibly complex. The same can be achieved by:
> 
>    1) Adding the object to the per CPU pool
> 
>    2) If that pool is full, move a batch of objects into the global pool
>       or if the global pool is full into the to free pool
> 
> This also prepares for batch processing.

It feels like cutting the Gordian knot with a sharp sword.

Reviewed-by: Zhen Lei <thunder.leizhen@huawei.com>

> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
>  lib/debugobjects.c |   99 ++++++++++++-----------------------------------------
>  1 file changed, 24 insertions(+), 75 deletions(-)
> 
> --- a/lib/debugobjects.c
> +++ b/lib/debugobjects.c
> @@ -199,6 +199,27 @@ static struct debug_obj *pcpu_alloc(void
>  	}
>  }
>  
> +static void pcpu_free(struct debug_obj *obj)
> +{
> +	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
> +
> +	lockdep_assert_irqs_disabled();
> +
> +	hlist_add_head(&obj->node, &pcp->objects);
> +	pcp->cnt++;
> +
> +	/* Pool full ? */
> +	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
> +		return;
> +
> +	/* Remove a batch from the per CPU pool */
> +	guard(raw_spinlock)(&pool_lock);
> +	/* Try to fit the batch into the pool_global first */
> +	if (!pool_move_batch(&pool_global, pcp))
> +		pool_move_batch(&pool_to_free, pcp);
> +	obj_pool_used -= ODEBUG_BATCH_SIZE;
> +}
> +
>  static void free_object_list(struct hlist_head *head)
>  {
>  	struct hlist_node *tmp;
> @@ -375,83 +396,11 @@ static void free_obj_work(struct work_st
>  
>  static void __free_object(struct debug_obj *obj)
>  {
> -	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
> -	struct obj_pool *percpu_pool;
> -	int lookahead_count = 0;
> -	bool work;
> -
>  	guard(irqsave)();
> -
> -	if (unlikely(!obj_cache)) {
> +	if (likely(obj_cache))
> +		pcpu_free(obj);
> +	else
>  		hlist_add_head(&obj->node, &pool_boot);
> -		return;
> -	}
> -
> -	/*
> -	 * Try to free it into the percpu pool first.
> -	 */
> -	percpu_pool = this_cpu_ptr(&pool_pcpu);
> -	if (percpu_pool->cnt < ODEBUG_POOL_PERCPU_SIZE) {
> -		hlist_add_head(&obj->node, &percpu_pool->objects);
> -		percpu_pool->cnt++;
> -		return;
> -	}
> -
> -	/*
> -	 * As the percpu pool is full, look ahead and pull out a batch
> -	 * of objects from the percpu pool and free them as well.
> -	 */
> -	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
> -		objs[lookahead_count] = __alloc_object(&percpu_pool->objects);
> -		if (!objs[lookahead_count])
> -			break;
> -		percpu_pool->cnt--;
> -	}
> -
> -	raw_spin_lock(&pool_lock);
> -	work = (pool_global.cnt > pool_global.max_cnt) && obj_cache &&
> -	       (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX);
> -	obj_pool_used--;
> -
> -	if (work) {
> -		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
> -		hlist_add_head(&obj->node, &pool_to_free.objects);
> -		if (lookahead_count) {
> -			WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + lookahead_count);
> -			obj_pool_used -= lookahead_count;
> -			while (lookahead_count) {
> -				hlist_add_head(&objs[--lookahead_count]->node,
> -					       &pool_to_free.objects);
> -			}
> -		}
> -
> -		if ((pool_global.cnt > pool_global.max_cnt) &&
> -		    (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX)) {
> -			int i;
> -
> -			/*
> -			 * Free one more batch of objects from obj_pool.
> -			 */
> -			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
> -				obj = __alloc_object(&pool_global.objects);
> -				hlist_add_head(&obj->node, &pool_to_free.objects);
> -				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
> -				WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
> -			}
> -		}
> -	} else {
> -		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
> -		hlist_add_head(&obj->node, &pool_global.objects);
> -		if (lookahead_count) {
> -			WRITE_ONCE(pool_global.cnt, pool_global.cnt + lookahead_count);
> -			obj_pool_used -= lookahead_count;
> -			while (lookahead_count) {
> -				hlist_add_head(&objs[--lookahead_count]->node,
> -					       &pool_global.objects);
> -			}
> -		}
> -	}
> -	raw_spin_unlock(&pool_lock);
>  }
>  
>  /*
> 
> .
> 

-- 
Regards,
  Zhen Lei
[tip: core/debugobjects] debugobjects: Rework object freeing
Posted by tip-bot2 for Thomas Gleixner 1 month, 1 week ago
The following commit has been merged into the core/debugobjects branch of tip:

Commit-ID:     a3b9e191f5fc11fa93176a4074a919d33d64c5fe
Gitweb:        https://git.kernel.org/tip/a3b9e191f5fc11fa93176a4074a919d33d64c5fe
Author:        Thomas Gleixner <tglx@linutronix.de>
AuthorDate:    Mon, 07 Oct 2024 18:50:10 +02:00
Committer:     Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Tue, 15 Oct 2024 17:30:32 +02:00

debugobjects: Rework object freeing

__free_object() is uncomprehensibly complex. The same can be achieved by:

   1) Adding the object to the per CPU pool

   2) If that pool is full, move a batch of objects into the global pool
      or if the global pool is full into the to free pool

This also prepares for batch processing.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Zhen Lei <thunder.leizhen@huawei.com>
Link: https://lore.kernel.org/all/20241007164913.955542307@linutronix.de

---
 lib/debugobjects.c |  99 ++++++++++----------------------------------
 1 file changed, 24 insertions(+), 75 deletions(-)

diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 3b18d5d..3700ddf 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -199,6 +199,27 @@ static struct debug_obj *pcpu_alloc(void)
 	}
 }
 
+static void pcpu_free(struct debug_obj *obj)
+{
+	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
+
+	lockdep_assert_irqs_disabled();
+
+	hlist_add_head(&obj->node, &pcp->objects);
+	pcp->cnt++;
+
+	/* Pool full ? */
+	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
+		return;
+
+	/* Remove a batch from the per CPU pool */
+	guard(raw_spinlock)(&pool_lock);
+	/* Try to fit the batch into the pool_global first */
+	if (!pool_move_batch(&pool_global, pcp))
+		pool_move_batch(&pool_to_free, pcp);
+	obj_pool_used -= ODEBUG_BATCH_SIZE;
+}
+
 static void free_object_list(struct hlist_head *head)
 {
 	struct hlist_node *tmp;
@@ -375,83 +396,11 @@ free_objs:
 
 static void __free_object(struct debug_obj *obj)
 {
-	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
-	struct obj_pool *percpu_pool;
-	int lookahead_count = 0;
-	bool work;
-
 	guard(irqsave)();
-
-	if (unlikely(!obj_cache)) {
+	if (likely(obj_cache))
+		pcpu_free(obj);
+	else
 		hlist_add_head(&obj->node, &pool_boot);
-		return;
-	}
-
-	/*
-	 * Try to free it into the percpu pool first.
-	 */
-	percpu_pool = this_cpu_ptr(&pool_pcpu);
-	if (percpu_pool->cnt < ODEBUG_POOL_PERCPU_SIZE) {
-		hlist_add_head(&obj->node, &percpu_pool->objects);
-		percpu_pool->cnt++;
-		return;
-	}
-
-	/*
-	 * As the percpu pool is full, look ahead and pull out a batch
-	 * of objects from the percpu pool and free them as well.
-	 */
-	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
-		objs[lookahead_count] = __alloc_object(&percpu_pool->objects);
-		if (!objs[lookahead_count])
-			break;
-		percpu_pool->cnt--;
-	}
-
-	raw_spin_lock(&pool_lock);
-	work = (pool_global.cnt > pool_global.max_cnt) && obj_cache &&
-	       (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX);
-	obj_pool_used--;
-
-	if (work) {
-		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
-		hlist_add_head(&obj->node, &pool_to_free.objects);
-		if (lookahead_count) {
-			WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + lookahead_count);
-			obj_pool_used -= lookahead_count;
-			while (lookahead_count) {
-				hlist_add_head(&objs[--lookahead_count]->node,
-					       &pool_to_free.objects);
-			}
-		}
-
-		if ((pool_global.cnt > pool_global.max_cnt) &&
-		    (pool_to_free.cnt < ODEBUG_FREE_WORK_MAX)) {
-			int i;
-
-			/*
-			 * Free one more batch of objects from obj_pool.
-			 */
-			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
-				obj = __alloc_object(&pool_global.objects);
-				hlist_add_head(&obj->node, &pool_to_free.objects);
-				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
-				WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt + 1);
-			}
-		}
-	} else {
-		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
-		hlist_add_head(&obj->node, &pool_global.objects);
-		if (lookahead_count) {
-			WRITE_ONCE(pool_global.cnt, pool_global.cnt + lookahead_count);
-			obj_pool_used -= lookahead_count;
-			while (lookahead_count) {
-				hlist_add_head(&objs[--lookahead_count]->node,
-					       &pool_global.objects);
-			}
-		}
-	}
-	raw_spin_unlock(&pool_lock);
 }
 
 /*