On 2024/10/8 0:50, Thomas Gleixner wrote:
> Move the loop to free a list of objects into a helper function so it can be
> reused later.
>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
> lib/debugobjects.c | 22 ++++++++++++++++------
> 1 file changed, 16 insertions(+), 6 deletions(-)
>
> --- a/lib/debugobjects.c
> +++ b/lib/debugobjects.c
> @@ -125,6 +125,20 @@ static const char *obj_states[ODEBUG_STA
> [ODEBUG_STATE_NOTAVAILABLE] = "not available",
> };
>
> +static void free_object_list(struct hlist_head *head)
> +{
> + struct hlist_node *tmp;
> + struct debug_obj *obj;
> + int cnt = 0;
> +
> + hlist_for_each_entry_safe(obj, tmp, head, node) {
> + hlist_del(&obj->node);
> + kmem_cache_free(obj_cache, obj);
> + cnt++;
> + }
> + debug_objects_freed += cnt;
debug_objects_freed was previously protected by pool_lock. Use atomic?
> +}
> +
> static void fill_pool(void)
> {
> gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
> @@ -286,7 +300,6 @@ alloc_object(void *addr, struct debug_bu
> */
> static void free_obj_work(struct work_struct *work)
> {
> - struct hlist_node *tmp;
> struct debug_obj *obj;
> unsigned long flags;
> HLIST_HEAD(tofree);
> @@ -323,15 +336,11 @@ static void free_obj_work(struct work_st
> */
> if (obj_nr_tofree) {
> hlist_move_list(&obj_to_free, &tofree);
> - debug_objects_freed += obj_nr_tofree;
> WRITE_ONCE(obj_nr_tofree, 0);
> }
> raw_spin_unlock_irqrestore(&pool_lock, flags);
>
> - hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
> - hlist_del(&obj->node);
> - kmem_cache_free(obj_cache, obj);
> - }
> + free_object_list(&tofree);
> }
>
> static void __free_object(struct debug_obj *obj)
> @@ -1334,6 +1343,7 @@ static bool __init debug_objects_replace
> }
> return true;
> free:
> + /* Can't use free_object_list() as the cache is not populated yet */
> hlist_for_each_entry_safe(obj, tmp, &objects, node) {
> hlist_del(&obj->node);
> kmem_cache_free(cache, obj);
>
> .
>
--
Regards,
Zhen Lei