The sub list can be prepared in advance outside the lock, so that the
operation time inside the lock can be reduced and the possibility of
lock conflict can be reduced.
Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
---
lib/debugobjects.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index fc8224f9f0eda8f..998724e9dee526b 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -167,23 +167,25 @@ static void fill_pool(void)
return;
while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
- struct debug_obj *new[ODEBUG_BATCH_SIZE];
+ HLIST_HEAD(batch_list);
+ struct debug_obj *new, *last;
int cnt;
for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
- new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
- if (!new[cnt])
+ new = kmem_cache_zalloc(obj_cache, gfp);
+ if (!new)
break;
+ hlist_add_head(&new->node, &batch_list);
+ if (cnt == 0)
+ last = new;
}
if (!cnt)
return;
raw_spin_lock_irqsave(&pool_lock, flags);
- while (cnt) {
- hlist_add_head(&new[--cnt]->node, &obj_pool);
- debug_objects_allocated++;
- WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
- }
+ hlist_splice_init(&batch_list, &last->node, &obj_pool);
+ debug_objects_allocated += cnt;
+ WRITE_ONCE(obj_pool_free, obj_pool_free + cnt);
raw_spin_unlock_irqrestore(&pool_lock, flags);
}
}
--
2.34.1
On Mon, Sep 02 2024 at 22:05, Zhen Lei wrote:
> The sub list can be prepared in advance outside the lock, so that the
> operation time inside the lock can be reduced and the possibility of
> lock conflict can be reduced.
>
> Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
> ---
> lib/debugobjects.c | 18 ++++++++++--------
> 1 file changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/lib/debugobjects.c b/lib/debugobjects.c
> index fc8224f9f0eda8f..998724e9dee526b 100644
> --- a/lib/debugobjects.c
> +++ b/lib/debugobjects.c
> @@ -167,23 +167,25 @@ static void fill_pool(void)
> return;
>
> while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
> - struct debug_obj *new[ODEBUG_BATCH_SIZE];
> + HLIST_HEAD(batch_list);
> + struct debug_obj *new, *last;
Variable ordering please.
> int cnt;
>
> for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
> - new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
> - if (!new[cnt])
> + new = kmem_cache_zalloc(obj_cache, gfp);
> + if (!new)
> break;
> + hlist_add_head(&new->node, &batch_list);
> + if (cnt == 0)
if (!cnt)
but it would be more self explaining if you have:
struct debug_obj *new, *last = NULL;
and then
if (!last)
> + last = new;
> }
Thanks,
tglx
On 2024/9/3 18:09, Thomas Gleixner wrote:
> On Mon, Sep 02 2024 at 22:05, Zhen Lei wrote:
>
>> The sub list can be prepared in advance outside the lock, so that the
>> operation time inside the lock can be reduced and the possibility of
>> lock conflict can be reduced.
>>
>> Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
>> ---
>> lib/debugobjects.c | 18 ++++++++++--------
>> 1 file changed, 10 insertions(+), 8 deletions(-)
>>
>> diff --git a/lib/debugobjects.c b/lib/debugobjects.c
>> index fc8224f9f0eda8f..998724e9dee526b 100644
>> --- a/lib/debugobjects.c
>> +++ b/lib/debugobjects.c
>> @@ -167,23 +167,25 @@ static void fill_pool(void)
>> return;
>>
>> while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
>> - struct debug_obj *new[ODEBUG_BATCH_SIZE];
>> + HLIST_HEAD(batch_list);
>> + struct debug_obj *new, *last;
>
> Variable ordering please.
OK
>
>> int cnt;
>>
>> for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
>> - new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
>> - if (!new[cnt])
>> + new = kmem_cache_zalloc(obj_cache, gfp);
>> + if (!new)
>> break;
>> + hlist_add_head(&new->node, &batch_list);
>> + if (cnt == 0)
>
> if (!cnt)
>
> but it would be more self explaining if you have:
>
> struct debug_obj *new, *last = NULL;
>
> and then
> if (!last)
OK, I will change it. I thought so, but I changed it to reduce
the null initialization. Ha ha, wrong choice.
>
>> + last = new;
>> }
>
> Thanks,
>
> tglx
> .
>
--
Regards,
Zhen Lei
© 2016 - 2025 Red Hat, Inc.