[patch 15/25] debugobjects: Rework object allocation

Thomas Gleixner posted 25 patches 1 month, 3 weeks ago
[patch 15/25] debugobjects: Rework object allocation
Posted by Thomas Gleixner 1 month, 3 weeks ago
The current allocation scheme tries to allocate from the per CPU pool
first. If that fails it allocates one object from the global pool and then
refills the per CPU pool from the global pool.

That is in the way of switching the pool management to batch mode as the
global pool needs to be a strict stack of batches, which does not allow
to allocate single objects.

Rework the code to refill the per CPU pool first and then allocate the
object from the refilled batch. Also try to allocate from the to free pool
first to avoid freeing and reallocating objects.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 lib/debugobjects.c |  144 +++++++++++++++++++++++++----------------------------
 1 file changed, 69 insertions(+), 75 deletions(-)

--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -141,6 +141,64 @@ static __always_inline bool pool_must_re
 	return pool_count(pool) < pool->min_cnt / 2;
 }
 
+static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
+{
+	if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt)
+		return false;
+
+	for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) {
+		struct hlist_node *node = src->objects.first;
+
+		WRITE_ONCE(src->cnt, src->cnt - 1);
+		WRITE_ONCE(dst->cnt, dst->cnt + 1);
+
+		hlist_del(node);
+		hlist_add_head(node, &dst->objects);
+	}
+	return true;
+}
+
+static struct debug_obj *__alloc_object(struct hlist_head *list)
+{
+	struct debug_obj *obj;
+
+	if (unlikely(!list->first))
+		return NULL;
+
+	obj = hlist_entry(list->first, typeof(*obj), node);
+	hlist_del(&obj->node);
+	return obj;
+}
+
+static struct debug_obj *pcpu_alloc(void)
+{
+	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
+
+	lockdep_assert_irqs_disabled();
+
+	for (;;) {
+		struct debug_obj *obj = __alloc_object(&pcp->objects);
+
+		if (likely(obj)) {
+			pcp->cnt--;
+			return obj;
+		}
+
+		guard(raw_spinlock)(&pool_lock);
+		if (!pool_move_batch(pcp, &pool_to_free)) {
+			if (!pool_move_batch(pcp, &pool_global))
+				return NULL;
+		}
+		obj_pool_used += pcp->cnt;
+
+		if (obj_pool_used > obj_pool_max_used)
+			obj_pool_max_used = obj_pool_used;
+
+		if (pool_global.cnt < obj_pool_min_free)
+			obj_pool_min_free = pool_global.cnt;
+	}
+}
+
 static void free_object_list(struct hlist_head *head)
 {
 	struct hlist_node *tmp;
@@ -158,7 +216,6 @@ static void free_object_list(struct hlis
 static void fill_pool_from_freelist(void)
 {
 	static unsigned long state;
-	struct debug_obj *obj;
 
 	/*
 	 * Reuse objs from the global obj_to_free list; they will be
@@ -180,17 +237,11 @@ static void fill_pool_from_freelist(void
 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
 		return;
 
-	guard(raw_spinlock)(&pool_lock);
-	/*
-	 * Recheck with the lock held as the worker thread might have
-	 * won the race and freed the global free list already.
-	 */
-	while (pool_to_free.cnt && (pool_global.cnt < pool_global.min_cnt)) {
-		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
-		hlist_del(&obj->node);
-		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
-		hlist_add_head(&obj->node, &pool_global.objects);
-		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
+	/* Avoid taking the lock when there is no work to do */
+	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
+		guard(raw_spinlock)(&pool_lock);
+		/* Move a batch if possible */
+		pool_move_batch(&pool_global, &pool_to_free);
 	}
 	clear_bit(0, &state);
 }
@@ -251,74 +302,17 @@ static struct debug_obj *lookup_object(v
 	return NULL;
 }
 
-/*
- * Allocate a new object from the hlist
- */
-static struct debug_obj *__alloc_object(struct hlist_head *list)
+static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
+				      const struct debug_obj_descr *descr)
 {
-	struct debug_obj *obj = NULL;
-
-	if (list->first) {
-		obj = hlist_entry(list->first, typeof(*obj), node);
-		hlist_del(&obj->node);
-	}
-
-	return obj;
-}
-
-static struct debug_obj *
-alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
-{
-	struct obj_pool *percpu_pool = this_cpu_ptr(&pool_pcpu);
 	struct debug_obj *obj;
 
-	if (likely(obj_cache)) {
-		obj = __alloc_object(&percpu_pool->objects);
-		if (obj) {
-			percpu_pool->cnt--;
-			goto init_obj;
-		}
-	} else {
+	if (likely(obj_cache))
+		obj = pcpu_alloc();
+	else
 		obj = __alloc_object(&pool_boot);
-		goto init_obj;
-	}
-
-	raw_spin_lock(&pool_lock);
-	obj = __alloc_object(&pool_global.objects);
-	if (obj) {
-		obj_pool_used++;
-		WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
-
-		/*
-		 * Looking ahead, allocate one batch of debug objects and
-		 * put them into the percpu free pool.
-		 */
-		if (likely(obj_cache)) {
-			int i;
-
-			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
-				struct debug_obj *obj2;
-
-				obj2 = __alloc_object(&pool_global.objects);
-				if (!obj2)
-					break;
-				hlist_add_head(&obj2->node, &percpu_pool->objects);
-				percpu_pool->cnt++;
-				obj_pool_used++;
-				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
-			}
-		}
-
-		if (obj_pool_used > obj_pool_max_used)
-			obj_pool_max_used = obj_pool_used;
-
-		if (pool_global.cnt < obj_pool_min_free)
-			obj_pool_min_free = pool_global.cnt;
-	}
-	raw_spin_unlock(&pool_lock);
 
-init_obj:
-	if (obj) {
+	if (likely(obj)) {
 		obj->object = addr;
 		obj->descr  = descr;
 		obj->state  = ODEBUG_STATE_NONE;
Re: [patch 15/25] debugobjects: Rework object allocation
Posted by Leizhen (ThunderTown) 1 month, 2 weeks ago

On 2024/10/8 0:50, Thomas Gleixner wrote:
> The current allocation scheme tries to allocate from the per CPU pool
> first. If that fails it allocates one object from the global pool and then
> refills the per CPU pool from the global pool.
> 
> That is in the way of switching the pool management to batch mode as the
> global pool needs to be a strict stack of batches, which does not allow
> to allocate single objects.
> 
> Rework the code to refill the per CPU pool first and then allocate the
> object from the refilled batch. Also try to allocate from the to free pool
> first to avoid freeing and reallocating objects.

Reviewed-by: Zhen Lei <thunder.leizhen@huawei.com>

> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
>  lib/debugobjects.c |  144 +++++++++++++++++++++++++----------------------------
>  1 file changed, 69 insertions(+), 75 deletions(-)
> 
> --- a/lib/debugobjects.c
> +++ b/lib/debugobjects.c
> @@ -141,6 +141,64 @@ static __always_inline bool pool_must_re
>  	return pool_count(pool) < pool->min_cnt / 2;
>  }
>  
> +static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
> +{
> +	if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt)
> +		return false;
> +
> +	for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) {
> +		struct hlist_node *node = src->objects.first;
> +
> +		WRITE_ONCE(src->cnt, src->cnt - 1);
> +		WRITE_ONCE(dst->cnt, dst->cnt + 1);
> +
> +		hlist_del(node);
> +		hlist_add_head(node, &dst->objects);
> +	}
> +	return true;
> +}
> +
> +static struct debug_obj *__alloc_object(struct hlist_head *list)
> +{
> +	struct debug_obj *obj;
> +
> +	if (unlikely(!list->first))
> +		return NULL;
> +
> +	obj = hlist_entry(list->first, typeof(*obj), node);
> +	hlist_del(&obj->node);
> +	return obj;
> +}
> +
> +static struct debug_obj *pcpu_alloc(void)
> +{
> +	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
> +
> +	lockdep_assert_irqs_disabled();
> +
> +	for (;;) {
> +		struct debug_obj *obj = __alloc_object(&pcp->objects);
> +
> +		if (likely(obj)) {
> +			pcp->cnt--;
> +			return obj;
> +		}
> +
> +		guard(raw_spinlock)(&pool_lock);
> +		if (!pool_move_batch(pcp, &pool_to_free)) {
> +			if (!pool_move_batch(pcp, &pool_global))
> +				return NULL;
> +		}
> +		obj_pool_used += pcp->cnt;
> +
> +		if (obj_pool_used > obj_pool_max_used)
> +			obj_pool_max_used = obj_pool_used;
> +
> +		if (pool_global.cnt < obj_pool_min_free)
> +			obj_pool_min_free = pool_global.cnt;
> +	}
> +}
> +
>  static void free_object_list(struct hlist_head *head)
>  {
>  	struct hlist_node *tmp;
> @@ -158,7 +216,6 @@ static void free_object_list(struct hlis
>  static void fill_pool_from_freelist(void)
>  {
>  	static unsigned long state;
> -	struct debug_obj *obj;
>  
>  	/*
>  	 * Reuse objs from the global obj_to_free list; they will be
> @@ -180,17 +237,11 @@ static void fill_pool_from_freelist(void
>  	if (test_bit(0, &state) || test_and_set_bit(0, &state))
>  		return;
>  
> -	guard(raw_spinlock)(&pool_lock);
> -	/*
> -	 * Recheck with the lock held as the worker thread might have
> -	 * won the race and freed the global free list already.
> -	 */
> -	while (pool_to_free.cnt && (pool_global.cnt < pool_global.min_cnt)) {
> -		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
> -		hlist_del(&obj->node);
> -		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
> -		hlist_add_head(&obj->node, &pool_global.objects);
> -		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
> +	/* Avoid taking the lock when there is no work to do */
> +	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
> +		guard(raw_spinlock)(&pool_lock);
> +		/* Move a batch if possible */
> +		pool_move_batch(&pool_global, &pool_to_free);
>  	}
>  	clear_bit(0, &state);
>  }
> @@ -251,74 +302,17 @@ static struct debug_obj *lookup_object(v
>  	return NULL;
>  }
>  
> -/*
> - * Allocate a new object from the hlist
> - */
> -static struct debug_obj *__alloc_object(struct hlist_head *list)
> +static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
> +				      const struct debug_obj_descr *descr)
>  {
> -	struct debug_obj *obj = NULL;
> -
> -	if (list->first) {
> -		obj = hlist_entry(list->first, typeof(*obj), node);
> -		hlist_del(&obj->node);
> -	}
> -
> -	return obj;
> -}
> -
> -static struct debug_obj *
> -alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
> -{
> -	struct obj_pool *percpu_pool = this_cpu_ptr(&pool_pcpu);
>  	struct debug_obj *obj;
>  
> -	if (likely(obj_cache)) {
> -		obj = __alloc_object(&percpu_pool->objects);
> -		if (obj) {
> -			percpu_pool->cnt--;
> -			goto init_obj;
> -		}
> -	} else {
> +	if (likely(obj_cache))
> +		obj = pcpu_alloc();
> +	else
>  		obj = __alloc_object(&pool_boot);
> -		goto init_obj;
> -	}
> -
> -	raw_spin_lock(&pool_lock);
> -	obj = __alloc_object(&pool_global.objects);
> -	if (obj) {
> -		obj_pool_used++;
> -		WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
> -
> -		/*
> -		 * Looking ahead, allocate one batch of debug objects and
> -		 * put them into the percpu free pool.
> -		 */
> -		if (likely(obj_cache)) {
> -			int i;
> -
> -			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
> -				struct debug_obj *obj2;
> -
> -				obj2 = __alloc_object(&pool_global.objects);
> -				if (!obj2)
> -					break;
> -				hlist_add_head(&obj2->node, &percpu_pool->objects);
> -				percpu_pool->cnt++;
> -				obj_pool_used++;
> -				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
> -			}
> -		}
> -
> -		if (obj_pool_used > obj_pool_max_used)
> -			obj_pool_max_used = obj_pool_used;
> -
> -		if (pool_global.cnt < obj_pool_min_free)
> -			obj_pool_min_free = pool_global.cnt;
> -	}
> -	raw_spin_unlock(&pool_lock);
>  
> -init_obj:
> -	if (obj) {
> +	if (likely(obj)) {
>  		obj->object = addr;
>  		obj->descr  = descr;
>  		obj->state  = ODEBUG_STATE_NONE;
> 
> .
> 

-- 
Regards,
  Zhen Lei
[tip: core/debugobjects] debugobjects: Rework object allocation
Posted by tip-bot2 for Thomas Gleixner 1 month, 1 week ago
The following commit has been merged into the core/debugobjects branch of tip:

Commit-ID:     fb60c004f33e0fa2e87b9456b87f1b2709436b88
Gitweb:        https://git.kernel.org/tip/fb60c004f33e0fa2e87b9456b87f1b2709436b88
Author:        Thomas Gleixner <tglx@linutronix.de>
AuthorDate:    Mon, 07 Oct 2024 18:50:09 +02:00
Committer:     Thomas Gleixner <tglx@linutronix.de>
CommitterDate: Tue, 15 Oct 2024 17:30:32 +02:00

debugobjects: Rework object allocation

The current allocation scheme tries to allocate from the per CPU pool
first. If that fails it allocates one object from the global pool and then
refills the per CPU pool from the global pool.

That is in the way of switching the pool management to batch mode as the
global pool needs to be a strict stack of batches, which does not allow
to allocate single objects.

Rework the code to refill the per CPU pool first and then allocate the
object from the refilled batch. Also try to allocate from the to free pool
first to avoid freeing and reallocating objects.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Zhen Lei <thunder.leizhen@huawei.com>
Link: https://lore.kernel.org/all/20241007164913.893554162@linutronix.de

---
 lib/debugobjects.c | 144 +++++++++++++++++++++-----------------------
 1 file changed, 69 insertions(+), 75 deletions(-)

diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index fbe8f26..3b18d5d 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -141,6 +141,64 @@ static __always_inline bool pool_must_refill(struct obj_pool *pool)
 	return pool_count(pool) < pool->min_cnt / 2;
 }
 
+static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
+{
+	if (dst->cnt + ODEBUG_BATCH_SIZE > dst->max_cnt || !src->cnt)
+		return false;
+
+	for (int i = 0; i < ODEBUG_BATCH_SIZE && src->cnt; i++) {
+		struct hlist_node *node = src->objects.first;
+
+		WRITE_ONCE(src->cnt, src->cnt - 1);
+		WRITE_ONCE(dst->cnt, dst->cnt + 1);
+
+		hlist_del(node);
+		hlist_add_head(node, &dst->objects);
+	}
+	return true;
+}
+
+static struct debug_obj *__alloc_object(struct hlist_head *list)
+{
+	struct debug_obj *obj;
+
+	if (unlikely(!list->first))
+		return NULL;
+
+	obj = hlist_entry(list->first, typeof(*obj), node);
+	hlist_del(&obj->node);
+	return obj;
+}
+
+static struct debug_obj *pcpu_alloc(void)
+{
+	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
+
+	lockdep_assert_irqs_disabled();
+
+	for (;;) {
+		struct debug_obj *obj = __alloc_object(&pcp->objects);
+
+		if (likely(obj)) {
+			pcp->cnt--;
+			return obj;
+		}
+
+		guard(raw_spinlock)(&pool_lock);
+		if (!pool_move_batch(pcp, &pool_to_free)) {
+			if (!pool_move_batch(pcp, &pool_global))
+				return NULL;
+		}
+		obj_pool_used += pcp->cnt;
+
+		if (obj_pool_used > obj_pool_max_used)
+			obj_pool_max_used = obj_pool_used;
+
+		if (pool_global.cnt < obj_pool_min_free)
+			obj_pool_min_free = pool_global.cnt;
+	}
+}
+
 static void free_object_list(struct hlist_head *head)
 {
 	struct hlist_node *tmp;
@@ -158,7 +216,6 @@ static void free_object_list(struct hlist_head *head)
 static void fill_pool_from_freelist(void)
 {
 	static unsigned long state;
-	struct debug_obj *obj;
 
 	/*
 	 * Reuse objs from the global obj_to_free list; they will be
@@ -180,17 +237,11 @@ static void fill_pool_from_freelist(void)
 	if (test_bit(0, &state) || test_and_set_bit(0, &state))
 		return;
 
-	guard(raw_spinlock)(&pool_lock);
-	/*
-	 * Recheck with the lock held as the worker thread might have
-	 * won the race and freed the global free list already.
-	 */
-	while (pool_to_free.cnt && (pool_global.cnt < pool_global.min_cnt)) {
-		obj = hlist_entry(pool_to_free.objects.first, typeof(*obj), node);
-		hlist_del(&obj->node);
-		WRITE_ONCE(pool_to_free.cnt, pool_to_free.cnt - 1);
-		hlist_add_head(&obj->node, &pool_global.objects);
-		WRITE_ONCE(pool_global.cnt, pool_global.cnt + 1);
+	/* Avoid taking the lock when there is no work to do */
+	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
+		guard(raw_spinlock)(&pool_lock);
+		/* Move a batch if possible */
+		pool_move_batch(&pool_global, &pool_to_free);
 	}
 	clear_bit(0, &state);
 }
@@ -251,74 +302,17 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 	return NULL;
 }
 
-/*
- * Allocate a new object from the hlist
- */
-static struct debug_obj *__alloc_object(struct hlist_head *list)
-{
-	struct debug_obj *obj = NULL;
-
-	if (list->first) {
-		obj = hlist_entry(list->first, typeof(*obj), node);
-		hlist_del(&obj->node);
-	}
-
-	return obj;
-}
-
-static struct debug_obj *
-alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
+static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
+				      const struct debug_obj_descr *descr)
 {
-	struct obj_pool *percpu_pool = this_cpu_ptr(&pool_pcpu);
 	struct debug_obj *obj;
 
-	if (likely(obj_cache)) {
-		obj = __alloc_object(&percpu_pool->objects);
-		if (obj) {
-			percpu_pool->cnt--;
-			goto init_obj;
-		}
-	} else {
+	if (likely(obj_cache))
+		obj = pcpu_alloc();
+	else
 		obj = __alloc_object(&pool_boot);
-		goto init_obj;
-	}
-
-	raw_spin_lock(&pool_lock);
-	obj = __alloc_object(&pool_global.objects);
-	if (obj) {
-		obj_pool_used++;
-		WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
 
-		/*
-		 * Looking ahead, allocate one batch of debug objects and
-		 * put them into the percpu free pool.
-		 */
-		if (likely(obj_cache)) {
-			int i;
-
-			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
-				struct debug_obj *obj2;
-
-				obj2 = __alloc_object(&pool_global.objects);
-				if (!obj2)
-					break;
-				hlist_add_head(&obj2->node, &percpu_pool->objects);
-				percpu_pool->cnt++;
-				obj_pool_used++;
-				WRITE_ONCE(pool_global.cnt, pool_global.cnt - 1);
-			}
-		}
-
-		if (obj_pool_used > obj_pool_max_used)
-			obj_pool_max_used = obj_pool_used;
-
-		if (pool_global.cnt < obj_pool_min_free)
-			obj_pool_min_free = pool_global.cnt;
-	}
-	raw_spin_unlock(&pool_lock);
-
-init_obj:
-	if (obj) {
+	if (likely(obj)) {
 		obj->object = addr;
 		obj->descr  = descr;
 		obj->state  = ODEBUG_STATE_NONE;