From: Zhen Lei <thunder.leizhen@huawei.com>
Collect the newly allocated debug objects in a list outside the lock, so
that the lock held time and the potential lock contention is reduced.
Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20240911083521.2257-3-thunder.leizhen@huawei.com
---
lib/debugobjects.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -161,23 +161,25 @@ static void fill_pool(void)
return;
while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
- struct debug_obj *new[ODEBUG_BATCH_SIZE];
+ struct debug_obj *new, *last = NULL;
+ HLIST_HEAD(head);
int cnt;
for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
- new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
- if (!new[cnt])
+ new = kmem_cache_zalloc(obj_cache, gfp);
+ if (!new)
break;
+ hlist_add_head(&new->node, &head);
+ if (!last)
+ last = new;
}
if (!cnt)
return;
raw_spin_lock_irqsave(&pool_lock, flags);
- while (cnt) {
- hlist_add_head(&new[--cnt]->node, &obj_pool);
- debug_objects_allocated++;
- WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
- }
+ hlist_splice_init(&head, &last->node, &obj_pool);
+ debug_objects_allocated += cnt;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + cnt);
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
}