From nobody Tue Dec 16 21:47:19 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DB50BC0018A for ; Thu, 2 Nov 2023 03:24:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1348384AbjKBDYx (ORCPT ); Wed, 1 Nov 2023 23:24:53 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47632 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1348330AbjKBDYv (ORCPT ); Wed, 1 Nov 2023 23:24:51 -0400 Received: from out-188.mta1.migadu.com (out-188.mta1.migadu.com [95.215.58.188]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0DD6A113 for ; Wed, 1 Nov 2023 20:24:44 -0700 (PDT) X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.dev; s=key1; t=1698895483; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=rXE5qC66xflKju/b5K0r0xT61UeednKHJ/BZyj3zT9w=; b=Hb4c5/5cMu557sfyb4Qm+N+E2afW9N5tC2dmGC7W2tqshdmHyIQi0ignzc96Sfv3lH/6v4 /pMsyHBXFmwbe/kt7UshRgo+l+u5TurmWX3zHZI3IJ9XGdtBS8AY4ZNbtEcl52hm/S2deO rdYAz7rU1LmdQ25nUsrxBo6s5PDHjlI= From: chengming.zhou@linux.dev To: vbabka@suse.cz, cl@linux.com, penberg@kernel.org Cc: rientjes@google.com, iamjoonsoo.kim@lge.com, akpm@linux-foundation.org, roman.gushchin@linux.dev, 42.hyeyoo@gmail.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org, chengming.zhou@linux.dev, Chengming Zhou Subject: [PATCH v5 2/9] slub: Change get_partial() interfaces to return slab Date: Thu, 2 Nov 2023 03:23:23 +0000 Message-Id: <20231102032330.1036151-3-chengming.zhou@linux.dev> In-Reply-To: <20231102032330.1036151-1-chengming.zhou@linux.dev> References: <20231102032330.1036151-1-chengming.zhou@linux.dev> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Migadu-Flow: FLOW_OUT Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Chengming Zhou We need all get_partial() related interfaces to return a slab, instead of returning the freelist (or object). Use the partial_context.object to return back freelist or object for now. This patch shouldn't have any functional changes. Suggested-by: Vlastimil Babka Signed-off-by: Chengming Zhou Reviewed-by: Vlastimil Babka Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> --- mm/slub.c | 63 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 0b0fdc8c189f..03384cd965c5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -204,9 +204,9 @@ DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); =20 /* Structure holding parameters for get_partial() call chain */ struct partial_context { - struct slab **slab; gfp_t flags; unsigned int orig_size; + void *object; }; =20 static inline bool kmem_cache_debug(struct kmem_cache *s) @@ -2269,10 +2269,11 @@ static inline bool pfmemalloc_match(struct slab *sl= ab, gfp_t gfpflags); /* * Try to allocate a partial slab from a specific node. */ -static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node= *n, - struct partial_context *pc) +static struct slab *get_partial_node(struct kmem_cache *s, + struct kmem_cache_node *n, + struct partial_context *pc) { - struct slab *slab, *slab2; + struct slab *slab, *slab2, *partial =3D NULL; void *object =3D NULL; unsigned long flags; unsigned int partial_slabs =3D 0; @@ -2288,27 +2289,28 @@ static void *get_partial_node(struct kmem_cache *s,= struct kmem_cache_node *n, =20 spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) { - void *t; - if (!pfmemalloc_match(slab, pc->flags)) continue; =20 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { object =3D alloc_single_from_partial(s, n, slab, pc->orig_size); - if (object) + if (object) { + partial =3D slab; + pc->object =3D object; break; + } continue; } =20 - t =3D acquire_slab(s, n, slab, object =3D=3D NULL); - if (!t) + object =3D acquire_slab(s, n, slab, object =3D=3D NULL); + if (!object) break; =20 - if (!object) { - *pc->slab =3D slab; + if (!partial) { + partial =3D slab; + pc->object =3D object; stat(s, ALLOC_FROM_PARTIAL); - object =3D t; } else { put_cpu_partial(s, slab, 0); stat(s, CPU_PARTIAL_NODE); @@ -2324,20 +2326,21 @@ static void *get_partial_node(struct kmem_cache *s,= struct kmem_cache_node *n, =20 } spin_unlock_irqrestore(&n->list_lock, flags); - return object; + return partial; } =20 /* * Get a slab from somewhere. Search in increasing NUMA distances. */ -static void *get_any_partial(struct kmem_cache *s, struct partial_context = *pc) +static struct slab *get_any_partial(struct kmem_cache *s, + struct partial_context *pc) { #ifdef CONFIG_NUMA struct zonelist *zonelist; struct zoneref *z; struct zone *zone; enum zone_type highest_zoneidx =3D gfp_zone(pc->flags); - void *object; + struct slab *slab; unsigned int cpuset_mems_cookie; =20 /* @@ -2372,8 +2375,8 @@ static void *get_any_partial(struct kmem_cache *s, st= ruct partial_context *pc) =20 if (n && cpuset_zone_allowed(zone, pc->flags) && n->nr_partial > s->min_partial) { - object =3D get_partial_node(s, n, pc); - if (object) { + slab =3D get_partial_node(s, n, pc); + if (slab) { /* * Don't check read_mems_allowed_retry() * here - if mems_allowed was updated in @@ -2381,7 +2384,7 @@ static void *get_any_partial(struct kmem_cache *s, st= ruct partial_context *pc) * between allocation and the cpuset * update */ - return object; + return slab; } } } @@ -2393,17 +2396,18 @@ static void *get_any_partial(struct kmem_cache *s, = struct partial_context *pc) /* * Get a partial slab, lock it and return it. */ -static void *get_partial(struct kmem_cache *s, int node, struct partial_co= ntext *pc) +static struct slab *get_partial(struct kmem_cache *s, int node, + struct partial_context *pc) { - void *object; + struct slab *slab; int searchnode =3D node; =20 if (node =3D=3D NUMA_NO_NODE) searchnode =3D numa_mem_id(); =20 - object =3D get_partial_node(s, get_node(s, searchnode), pc); - if (object || node !=3D NUMA_NO_NODE) - return object; + slab =3D get_partial_node(s, get_node(s, searchnode), pc); + if (slab || node !=3D NUMA_NO_NODE) + return slab; =20 return get_any_partial(s, pc); } @@ -3213,10 +3217,10 @@ static void *___slab_alloc(struct kmem_cache *s, gf= p_t gfpflags, int node, new_objects: =20 pc.flags =3D gfpflags; - pc.slab =3D &slab; pc.orig_size =3D orig_size; - freelist =3D get_partial(s, node, &pc); - if (freelist) { + slab =3D get_partial(s, node, &pc); + if (slab) { + freelist =3D pc.object; if (kmem_cache_debug(s)) { /* * For debug caches here we had to go through @@ -3408,12 +3412,11 @@ static void *__slab_alloc_node(struct kmem_cache *s, void *object; =20 pc.flags =3D gfpflags; - pc.slab =3D &slab; pc.orig_size =3D orig_size; - object =3D get_partial(s, node, &pc); + slab =3D get_partial(s, node, &pc); =20 - if (object) - return object; + if (slab) + return pc.object; =20 slab =3D new_slab(s, gfpflags, node); if (unlikely(!slab)) { --=20 2.20.1