[RFC PATCH V3 3/7] mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper

Harry Yoo posted 7 patches 3 months, 2 weeks ago
[RFC PATCH V3 3/7] mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper
Posted by Harry Yoo 3 months, 2 weeks ago
Currently, the slab allocator assumes that slab->obj_exts is a pointer
to an array of struct slabobj_ext objects. However, to support storage
methods where struct slabobj_ext is embedded within objects, the slab
allocator should not make this assumption. Instead of directly
dereferencing the slabobj_exts array, abstract access to
struct slabobj_ext via helper functions.

Introduce a new API slabobj_ext metadata access:

  slab_obj_ext(slab, obj_exts, index) - returns the pointer to
  struct slabobj_ext element at the given index.

Directly dereferencing the return value of slab_obj_exts() is no longer
allowed. Instead, slab_obj_ext() must always be used to access
individual struct slabobj_ext objects.

Convert all users to use these APIs.
No functional changes intended.

Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
---
 mm/memcontrol.c | 23 ++++++++++++++++-------
 mm/slab.h       | 43 ++++++++++++++++++++++++++++++++++++------
 mm/slub.c       | 50 ++++++++++++++++++++++++++++---------------------
 3 files changed, 82 insertions(+), 34 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8dd7fbed5a94..2a9dc246e802 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2566,7 +2566,8 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
 	 * slab->obj_exts.
 	 */
 	if (folio_test_slab(folio)) {
-		struct slabobj_ext *obj_exts;
+		unsigned long obj_exts;
+		struct slabobj_ext *obj_ext;
 		struct slab *slab;
 		unsigned int off;
 
@@ -2576,8 +2577,9 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
 			return NULL;
 
 		off = obj_to_index(slab->slab_cache, slab, p);
-		if (obj_exts[off].objcg)
-			return obj_cgroup_memcg(obj_exts[off].objcg);
+		obj_ext = slab_obj_ext(slab, obj_exts, off);
+		if (obj_ext->objcg)
+			return obj_cgroup_memcg(obj_ext->objcg);
 
 		return NULL;
 	}
@@ -3168,6 +3170,9 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
 	}
 
 	for (i = 0; i < size; i++) {
+		unsigned long obj_exts;
+		struct slabobj_ext *obj_ext;
+
 		slab = virt_to_slab(p[i]);
 
 		if (!slab_obj_exts(slab) &&
@@ -3190,29 +3195,33 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
 					slab_pgdat(slab), cache_vmstat_idx(s)))
 			return false;
 
+		obj_exts = slab_obj_exts(slab);
 		off = obj_to_index(s, slab, p[i]);
+		obj_ext = slab_obj_ext(slab, obj_exts, off);
 		obj_cgroup_get(objcg);
-		slab_obj_exts(slab)[off].objcg = objcg;
+		obj_ext->objcg = objcg;
 	}
 
 	return true;
 }
 
 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
-			    void **p, int objects, struct slabobj_ext *obj_exts)
+			    void **p, int objects, unsigned long obj_exts)
 {
 	size_t obj_size = obj_full_size(s);
 
 	for (int i = 0; i < objects; i++) {
 		struct obj_cgroup *objcg;
+		struct slabobj_ext *obj_ext;
 		unsigned int off;
 
 		off = obj_to_index(s, slab, p[i]);
-		objcg = obj_exts[off].objcg;
+		obj_ext = slab_obj_ext(slab, obj_exts, off);
+		objcg = obj_ext->objcg;
 		if (!objcg)
 			continue;
 
-		obj_exts[off].objcg = NULL;
+		obj_ext->objcg = NULL;
 		refill_obj_stock(objcg, obj_size, true, -obj_size,
 				 slab_pgdat(slab), cache_vmstat_idx(s));
 		obj_cgroup_put(objcg);
diff --git a/mm/slab.h b/mm/slab.h
index d63cc9b5e313..df2c987d950d 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -528,10 +528,12 @@ static inline bool slab_in_kunit_test(void) { return false; }
  * associated with a slab.
  * @slab: a pointer to the slab struct
  *
- * Returns a pointer to the object extension vector associated with the slab,
- * or NULL if no such vector has been associated yet.
+ * Returns the address of the object extension vector associated with the slab,
+ * or zero if no such vector has been associated yet.
+ * Do not dereference the return value directly; use slab_obj_ext() to access
+ * its elements.
  */
-static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
+static inline unsigned long slab_obj_exts(struct slab *slab)
 {
 	unsigned long obj_exts = READ_ONCE(slab->obj_exts);
 
@@ -544,7 +546,30 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
 		       obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab));
 	VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
 #endif
-	return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
+
+	return obj_exts & ~OBJEXTS_FLAGS_MASK;
+}
+
+/*
+ * slab_obj_ext - get the pointer to the slab object extension metadata
+ * associated with an object in a slab.
+ * @slab: a pointer to the slab struct
+ * @obj_exts: a pointer to the object extension vector
+ * @index: an index of the object
+ *
+ * Returns a pointer to the object extension associated with the object.
+ */
+static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
+					       unsigned long obj_exts,
+					       unsigned int index)
+{
+	struct slabobj_ext *obj_ext;
+
+	VM_WARN_ON_ONCE(!slab_obj_exts(slab));
+	VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
+
+	obj_ext = (struct slabobj_ext *)obj_exts;
+	return &obj_ext[index];
 }
 
 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
@@ -552,7 +577,13 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
 
 #else /* CONFIG_SLAB_OBJ_EXT */
 
-static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
+static inline unsigned long slab_obj_exts(struct slab *slab)
+{
+	return false;
+}
+
+static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
+					       unsigned int index)
 {
 	return NULL;
 }
@@ -569,7 +600,7 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
 				  gfp_t flags, size_t size, void **p);
 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
-			    void **p, int objects, struct slabobj_ext *obj_exts);
+			    void **p, int objects, unsigned long obj_exts);
 #endif
 
 void kvfree_rcu_cb(struct rcu_head *head);
diff --git a/mm/slub.c b/mm/slub.c
index 64705cb3734f..ae73403f8c29 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2031,7 +2031,7 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
 
 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
 {
-	struct slabobj_ext *slab_exts;
+	unsigned long slab_exts;
 	struct slab *obj_exts_slab;
 
 	obj_exts_slab = virt_to_slab(obj_exts);
@@ -2039,9 +2039,12 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
 	if (slab_exts) {
 		unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
 						 obj_exts_slab, obj_exts);
+		struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
+						       slab_exts, offs);
+
 		/* codetag should be NULL */
-		WARN_ON(slab_exts[offs].ref.ct);
-		set_codetag_empty(&slab_exts[offs].ref);
+		WARN_ON(ext->ref.ct);
+		set_codetag_empty(&ext->ref);
 	}
 }
 
@@ -2159,7 +2162,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
 
 static inline void free_slab_obj_exts(struct slab *slab)
 {
-	struct slabobj_ext *obj_exts;
+	unsigned long obj_exts;
 
 	obj_exts = slab_obj_exts(slab);
 	if (!obj_exts)
@@ -2172,11 +2175,11 @@ static inline void free_slab_obj_exts(struct slab *slab)
 	 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
 	 * the extension for obj_exts is expected to be NULL.
 	 */
-	mark_objexts_empty(obj_exts);
+	mark_objexts_empty((struct slabobj_ext *)obj_exts);
 	if (unlikely(READ_ONCE(slab->obj_exts) & OBJEXTS_NOSPIN_ALLOC))
-		kfree_nolock(obj_exts);
+		kfree_nolock((void *)obj_exts);
 	else
-		kfree(obj_exts);
+		kfree((void *)obj_exts);
 	slab->obj_exts = 0;
 }
 
@@ -2201,9 +2204,10 @@ static inline void free_slab_obj_exts(struct slab *slab)
 #ifdef CONFIG_MEM_ALLOC_PROFILING
 
 static inline struct slabobj_ext *
-prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
+prepare_slab_obj_ext_hook(struct kmem_cache *s, gfp_t flags, void *p)
 {
 	struct slab *slab;
+	unsigned long obj_exts;
 
 	if (!p)
 		return NULL;
@@ -2215,30 +2219,32 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
 		return NULL;
 
 	slab = virt_to_slab(p);
-	if (!slab_obj_exts(slab) &&
+	obj_exts = slab_obj_exts(slab);
+	if (!obj_exts &&
 	    alloc_slab_obj_exts(slab, s, flags, false)) {
 		pr_warn_once("%s, %s: Failed to create slab extension vector!\n",
 			     __func__, s->name);
 		return NULL;
 	}
 
-	return slab_obj_exts(slab) + obj_to_index(s, slab, p);
+	obj_exts = slab_obj_exts(slab);
+	return slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, p));
 }
 
 /* Should be called only if mem_alloc_profiling_enabled() */
 static noinline void
 __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
 {
-	struct slabobj_ext *obj_exts;
+	struct slabobj_ext *obj_ext;
 
-	obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
+	obj_ext = prepare_slab_obj_ext_hook(s, flags, object);
 	/*
 	 * Currently obj_exts is used only for allocation profiling.
 	 * If other users appear then mem_alloc_profiling_enabled()
 	 * check should be added before alloc_tag_add().
 	 */
-	if (likely(obj_exts))
-		alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
+	if (likely(obj_ext))
+		alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size);
 }
 
 static inline void
@@ -2253,8 +2259,8 @@ static noinline void
 __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
 			       int objects)
 {
-	struct slabobj_ext *obj_exts;
 	int i;
+	unsigned long obj_exts;
 
 	/* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
 	if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
@@ -2267,7 +2273,7 @@ __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p
 	for (i = 0; i < objects; i++) {
 		unsigned int off = obj_to_index(s, slab, p[i]);
 
-		alloc_tag_sub(&obj_exts[off].ref, s->size);
+		alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
 	}
 }
 
@@ -2326,7 +2332,7 @@ static __fastpath_inline
 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
 			  int objects)
 {
-	struct slabobj_ext *obj_exts;
+	unsigned long obj_exts;
 
 	if (!memcg_kmem_online())
 		return;
@@ -2341,7 +2347,8 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
 static __fastpath_inline
 bool memcg_slab_post_charge(void *p, gfp_t flags)
 {
-	struct slabobj_ext *slab_exts;
+	unsigned long obj_exts;
+	struct slabobj_ext *obj_ext;
 	struct kmem_cache *s;
 	struct folio *folio;
 	struct slab *slab;
@@ -2381,10 +2388,11 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
 		return true;
 
 	/* Ignore already charged objects. */
-	slab_exts = slab_obj_exts(slab);
-	if (slab_exts) {
+	obj_exts = slab_obj_exts(slab);
+	if (obj_exts) {
 		off = obj_to_index(s, slab, p);
-		if (unlikely(slab_exts[off].objcg))
+		obj_ext = slab_obj_ext(slab, obj_exts, off);
+		if (unlikely(obj_ext->objcg))
 			return true;
 	}
 
-- 
2.43.0
Re: [RFC PATCH V3 3/7] mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper
Posted by Suren Baghdasaryan 3 months, 1 week ago
On Mon, Oct 27, 2025 at 5:29 AM Harry Yoo <harry.yoo@oracle.com> wrote:
>
> Currently, the slab allocator assumes that slab->obj_exts is a pointer
> to an array of struct slabobj_ext objects. However, to support storage
> methods where struct slabobj_ext is embedded within objects, the slab
> allocator should not make this assumption. Instead of directly
> dereferencing the slabobj_exts array, abstract access to
> struct slabobj_ext via helper functions.
>
> Introduce a new API slabobj_ext metadata access:
>
>   slab_obj_ext(slab, obj_exts, index) - returns the pointer to
>   struct slabobj_ext element at the given index.
>
> Directly dereferencing the return value of slab_obj_exts() is no longer
> allowed. Instead, slab_obj_ext() must always be used to access
> individual struct slabobj_ext objects.

If direct access to the vector is not allowed, it would be better to
eliminate slab_obj_exts() function completely and use the new
slab_obj_ext() instead. I think that's possible. We might need an
additional `bool is_slab_obj_exts()` helper for an early check before
we calculate the object index but that's quite easy.

>
> Convert all users to use these APIs.
> No functional changes intended.
>
> Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
> ---
>  mm/memcontrol.c | 23 ++++++++++++++++-------
>  mm/slab.h       | 43 ++++++++++++++++++++++++++++++++++++------
>  mm/slub.c       | 50 ++++++++++++++++++++++++++++---------------------
>  3 files changed, 82 insertions(+), 34 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 8dd7fbed5a94..2a9dc246e802 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2566,7 +2566,8 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
>          * slab->obj_exts.
>          */
>         if (folio_test_slab(folio)) {
> -               struct slabobj_ext *obj_exts;
> +               unsigned long obj_exts;
> +               struct slabobj_ext *obj_ext;
>                 struct slab *slab;
>                 unsigned int off;
>
> @@ -2576,8 +2577,9 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
>                         return NULL;
>
>                 off = obj_to_index(slab->slab_cache, slab, p);
> -               if (obj_exts[off].objcg)
> -                       return obj_cgroup_memcg(obj_exts[off].objcg);
> +               obj_ext = slab_obj_ext(slab, obj_exts, off);
> +               if (obj_ext->objcg)
> +                       return obj_cgroup_memcg(obj_ext->objcg);
>
>                 return NULL;
>         }
> @@ -3168,6 +3170,9 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
>         }
>
>         for (i = 0; i < size; i++) {
> +               unsigned long obj_exts;
> +               struct slabobj_ext *obj_ext;
> +
>                 slab = virt_to_slab(p[i]);
>
>                 if (!slab_obj_exts(slab) &&
> @@ -3190,29 +3195,33 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
>                                         slab_pgdat(slab), cache_vmstat_idx(s)))
>                         return false;
>
> +               obj_exts = slab_obj_exts(slab);
>                 off = obj_to_index(s, slab, p[i]);
> +               obj_ext = slab_obj_ext(slab, obj_exts, off);
>                 obj_cgroup_get(objcg);
> -               slab_obj_exts(slab)[off].objcg = objcg;
> +               obj_ext->objcg = objcg;
>         }
>
>         return true;
>  }
>
>  void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> -                           void **p, int objects, struct slabobj_ext *obj_exts)
> +                           void **p, int objects, unsigned long obj_exts)
>  {
>         size_t obj_size = obj_full_size(s);
>
>         for (int i = 0; i < objects; i++) {
>                 struct obj_cgroup *objcg;
> +               struct slabobj_ext *obj_ext;
>                 unsigned int off;
>
>                 off = obj_to_index(s, slab, p[i]);
> -               objcg = obj_exts[off].objcg;
> +               obj_ext = slab_obj_ext(slab, obj_exts, off);
> +               objcg = obj_ext->objcg;
>                 if (!objcg)
>                         continue;
>
> -               obj_exts[off].objcg = NULL;
> +               obj_ext->objcg = NULL;
>                 refill_obj_stock(objcg, obj_size, true, -obj_size,
>                                  slab_pgdat(slab), cache_vmstat_idx(s));
>                 obj_cgroup_put(objcg);
> diff --git a/mm/slab.h b/mm/slab.h
> index d63cc9b5e313..df2c987d950d 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -528,10 +528,12 @@ static inline bool slab_in_kunit_test(void) { return false; }
>   * associated with a slab.
>   * @slab: a pointer to the slab struct
>   *
> - * Returns a pointer to the object extension vector associated with the slab,
> - * or NULL if no such vector has been associated yet.
> + * Returns the address of the object extension vector associated with the slab,
> + * or zero if no such vector has been associated yet.
> + * Do not dereference the return value directly; use slab_obj_ext() to access
> + * its elements.
>   */
> -static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
> +static inline unsigned long slab_obj_exts(struct slab *slab)
>  {
>         unsigned long obj_exts = READ_ONCE(slab->obj_exts);
>
> @@ -544,7 +546,30 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
>                        obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab));
>         VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
>  #endif
> -       return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
> +
> +       return obj_exts & ~OBJEXTS_FLAGS_MASK;
> +}
> +
> +/*
> + * slab_obj_ext - get the pointer to the slab object extension metadata
> + * associated with an object in a slab.
> + * @slab: a pointer to the slab struct
> + * @obj_exts: a pointer to the object extension vector
> + * @index: an index of the object
> + *
> + * Returns a pointer to the object extension associated with the object.
> + */
> +static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
> +                                              unsigned long obj_exts,
> +                                              unsigned int index)
> +{
> +       struct slabobj_ext *obj_ext;
> +
> +       VM_WARN_ON_ONCE(!slab_obj_exts(slab));
> +       VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
> +
> +       obj_ext = (struct slabobj_ext *)obj_exts;
> +       return &obj_ext[index];
>  }
>
>  int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> @@ -552,7 +577,13 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
>
>  #else /* CONFIG_SLAB_OBJ_EXT */
>
> -static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
> +static inline unsigned long slab_obj_exts(struct slab *slab)
> +{
> +       return false;
> +}
> +
> +static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
> +                                              unsigned int index)
>  {
>         return NULL;
>  }
> @@ -569,7 +600,7 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
>  bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
>                                   gfp_t flags, size_t size, void **p);
>  void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> -                           void **p, int objects, struct slabobj_ext *obj_exts);
> +                           void **p, int objects, unsigned long obj_exts);
>  #endif
>
>  void kvfree_rcu_cb(struct rcu_head *head);
> diff --git a/mm/slub.c b/mm/slub.c
> index 64705cb3734f..ae73403f8c29 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2031,7 +2031,7 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
>
>  static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
>  {
> -       struct slabobj_ext *slab_exts;
> +       unsigned long slab_exts;
>         struct slab *obj_exts_slab;
>
>         obj_exts_slab = virt_to_slab(obj_exts);
> @@ -2039,9 +2039,12 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
>         if (slab_exts) {
>                 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
>                                                  obj_exts_slab, obj_exts);
> +               struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
> +                                                      slab_exts, offs);
> +
>                 /* codetag should be NULL */
> -               WARN_ON(slab_exts[offs].ref.ct);
> -               set_codetag_empty(&slab_exts[offs].ref);
> +               WARN_ON(ext->ref.ct);
> +               set_codetag_empty(&ext->ref);
>         }
>  }
>
> @@ -2159,7 +2162,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
>
>  static inline void free_slab_obj_exts(struct slab *slab)
>  {
> -       struct slabobj_ext *obj_exts;
> +       unsigned long obj_exts;
>
>         obj_exts = slab_obj_exts(slab);
>         if (!obj_exts)
> @@ -2172,11 +2175,11 @@ static inline void free_slab_obj_exts(struct slab *slab)
>          * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
>          * the extension for obj_exts is expected to be NULL.
>          */
> -       mark_objexts_empty(obj_exts);
> +       mark_objexts_empty((struct slabobj_ext *)obj_exts);
>         if (unlikely(READ_ONCE(slab->obj_exts) & OBJEXTS_NOSPIN_ALLOC))
> -               kfree_nolock(obj_exts);
> +               kfree_nolock((void *)obj_exts);
>         else
> -               kfree(obj_exts);
> +               kfree((void *)obj_exts);
>         slab->obj_exts = 0;
>  }
>
> @@ -2201,9 +2204,10 @@ static inline void free_slab_obj_exts(struct slab *slab)
>  #ifdef CONFIG_MEM_ALLOC_PROFILING
>
>  static inline struct slabobj_ext *
> -prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> +prepare_slab_obj_ext_hook(struct kmem_cache *s, gfp_t flags, void *p)
>  {
>         struct slab *slab;
> +       unsigned long obj_exts;
>
>         if (!p)
>                 return NULL;
> @@ -2215,30 +2219,32 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
>                 return NULL;
>
>         slab = virt_to_slab(p);
> -       if (!slab_obj_exts(slab) &&
> +       obj_exts = slab_obj_exts(slab);
> +       if (!obj_exts &&
>             alloc_slab_obj_exts(slab, s, flags, false)) {
>                 pr_warn_once("%s, %s: Failed to create slab extension vector!\n",
>                              __func__, s->name);
>                 return NULL;
>         }
>
> -       return slab_obj_exts(slab) + obj_to_index(s, slab, p);
> +       obj_exts = slab_obj_exts(slab);
> +       return slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, p));
>  }
>
>  /* Should be called only if mem_alloc_profiling_enabled() */
>  static noinline void
>  __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
>  {
> -       struct slabobj_ext *obj_exts;
> +       struct slabobj_ext *obj_ext;
>
> -       obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
> +       obj_ext = prepare_slab_obj_ext_hook(s, flags, object);
>         /*
>          * Currently obj_exts is used only for allocation profiling.
>          * If other users appear then mem_alloc_profiling_enabled()
>          * check should be added before alloc_tag_add().
>          */
> -       if (likely(obj_exts))
> -               alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
> +       if (likely(obj_ext))
> +               alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size);
>  }
>
>  static inline void
> @@ -2253,8 +2259,8 @@ static noinline void
>  __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
>                                int objects)
>  {
> -       struct slabobj_ext *obj_exts;
>         int i;
> +       unsigned long obj_exts;
>
>         /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
>         if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
> @@ -2267,7 +2273,7 @@ __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p
>         for (i = 0; i < objects; i++) {
>                 unsigned int off = obj_to_index(s, slab, p[i]);
>
> -               alloc_tag_sub(&obj_exts[off].ref, s->size);
> +               alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
>         }
>  }
>
> @@ -2326,7 +2332,7 @@ static __fastpath_inline
>  void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
>                           int objects)
>  {
> -       struct slabobj_ext *obj_exts;
> +       unsigned long obj_exts;
>
>         if (!memcg_kmem_online())
>                 return;
> @@ -2341,7 +2347,8 @@ void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
>  static __fastpath_inline
>  bool memcg_slab_post_charge(void *p, gfp_t flags)
>  {
> -       struct slabobj_ext *slab_exts;
> +       unsigned long obj_exts;
> +       struct slabobj_ext *obj_ext;
>         struct kmem_cache *s;
>         struct folio *folio;
>         struct slab *slab;
> @@ -2381,10 +2388,11 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
>                 return true;
>
>         /* Ignore already charged objects. */
> -       slab_exts = slab_obj_exts(slab);
> -       if (slab_exts) {
> +       obj_exts = slab_obj_exts(slab);
> +       if (obj_exts) {
>                 off = obj_to_index(s, slab, p);
> -               if (unlikely(slab_exts[off].objcg))
> +               obj_ext = slab_obj_ext(slab, obj_exts, off);
> +               if (unlikely(obj_ext->objcg))
>                         return true;
>         }
>
> --
> 2.43.0
>
Re: [RFC PATCH V3 3/7] mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper
Posted by Harry Yoo 3 months, 1 week ago
On Tue, Oct 28, 2025 at 10:55:39AM -0700, Suren Baghdasaryan wrote:
> On Mon, Oct 27, 2025 at 5:29 AM Harry Yoo <harry.yoo@oracle.com> wrote:
> >
> > Currently, the slab allocator assumes that slab->obj_exts is a pointer
> > to an array of struct slabobj_ext objects. However, to support storage
> > methods where struct slabobj_ext is embedded within objects, the slab
> > allocator should not make this assumption. Instead of directly
> > dereferencing the slabobj_exts array, abstract access to
> > struct slabobj_ext via helper functions.
> >
> > Introduce a new API slabobj_ext metadata access:
> >
> >   slab_obj_ext(slab, obj_exts, index) - returns the pointer to
> >   struct slabobj_ext element at the given index.
> >
> > Directly dereferencing the return value of slab_obj_exts() is no longer
> > allowed. Instead, slab_obj_ext() must always be used to access
> > individual struct slabobj_ext objects.
> 
> If direct access to the vector is not allowed, it would be better to
> eliminate slab_obj_exts() function completely and use the new
> slab_obj_ext() instead. I think that's possible. We might need an
> additional `bool is_slab_obj_exts()` helper for an early check before
> we calculate the object index but that's quite easy.

Good point, but that way we cannot avoid reading slab->obj_exts
multiple times when we access slabobj_ext of multiple objects
as it's accessed via READ_ONCE().

-- 
Cheers,
Harry / Hyeonggon
Re: [RFC PATCH V3 3/7] mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper
Posted by Suren Baghdasaryan 3 months, 1 week ago
On Wed, Oct 29, 2025 at 1:49 AM Harry Yoo <harry.yoo@oracle.com> wrote:
>
> On Tue, Oct 28, 2025 at 10:55:39AM -0700, Suren Baghdasaryan wrote:
> > On Mon, Oct 27, 2025 at 5:29 AM Harry Yoo <harry.yoo@oracle.com> wrote:
> > >
> > > Currently, the slab allocator assumes that slab->obj_exts is a pointer
> > > to an array of struct slabobj_ext objects. However, to support storage
> > > methods where struct slabobj_ext is embedded within objects, the slab
> > > allocator should not make this assumption. Instead of directly
> > > dereferencing the slabobj_exts array, abstract access to
> > > struct slabobj_ext via helper functions.
> > >
> > > Introduce a new API slabobj_ext metadata access:
> > >
> > >   slab_obj_ext(slab, obj_exts, index) - returns the pointer to
> > >   struct slabobj_ext element at the given index.
> > >
> > > Directly dereferencing the return value of slab_obj_exts() is no longer
> > > allowed. Instead, slab_obj_ext() must always be used to access
> > > individual struct slabobj_ext objects.
> >
> > If direct access to the vector is not allowed, it would be better to
> > eliminate slab_obj_exts() function completely and use the new
> > slab_obj_ext() instead. I think that's possible. We might need an
> > additional `bool is_slab_obj_exts()` helper for an early check before
> > we calculate the object index but that's quite easy.
>
> Good point, but that way we cannot avoid reading slab->obj_exts
> multiple times when we access slabobj_ext of multiple objects
> as it's accessed via READ_ONCE().

True. I think we use slab->obj_exts to loop over its elements only in
two places: __memcg_slab_post_alloc_hook() and
__memcg_slab_free_hook(). I guess we could implement some kind of
slab_objext_foreach() construct to loop over all elements of
slab->obj_exts?

>
> --
> Cheers,
> Harry / Hyeonggon
Re: [RFC PATCH V3 3/7] mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper
Posted by Harry Yoo 3 months, 1 week ago
On Wed, Oct 29, 2025 at 08:24:35AM -0700, Suren Baghdasaryan wrote:
> On Wed, Oct 29, 2025 at 1:49 AM Harry Yoo <harry.yoo@oracle.com> wrote:
> >
> > On Tue, Oct 28, 2025 at 10:55:39AM -0700, Suren Baghdasaryan wrote:
> > > On Mon, Oct 27, 2025 at 5:29 AM Harry Yoo <harry.yoo@oracle.com> wrote:
> > > >
> > > > Currently, the slab allocator assumes that slab->obj_exts is a pointer
> > > > to an array of struct slabobj_ext objects. However, to support storage
> > > > methods where struct slabobj_ext is embedded within objects, the slab
> > > > allocator should not make this assumption. Instead of directly
> > > > dereferencing the slabobj_exts array, abstract access to
> > > > struct slabobj_ext via helper functions.
> > > >
> > > > Introduce a new API slabobj_ext metadata access:
> > > >
> > > >   slab_obj_ext(slab, obj_exts, index) - returns the pointer to
> > > >   struct slabobj_ext element at the given index.
> > > >
> > > > Directly dereferencing the return value of slab_obj_exts() is no longer
> > > > allowed. Instead, slab_obj_ext() must always be used to access
> > > > individual struct slabobj_ext objects.
> > >
> > > If direct access to the vector is not allowed, it would be better to
> > > eliminate slab_obj_exts() function completely and use the new
> > > slab_obj_ext() instead. I think that's possible. We might need an
> > > additional `bool is_slab_obj_exts()` helper for an early check before
> > > we calculate the object index but that's quite easy.
> >
> > Good point, but that way we cannot avoid reading slab->obj_exts
> > multiple times when we access slabobj_ext of multiple objects
> > as it's accessed via READ_ONCE().
> 
> True. I think we use slab->obj_exts to loop over its elements only in
> two places: __memcg_slab_post_alloc_hook() and
> __memcg_slab_free_hook(). I guess we could implement some kind of
> slab_objext_foreach() construct to loop over all elements of
> slab->obj_exts?

Not sure if that would help here. In __memcg_slab_free_hook() we want to
iterate only some of (not all of) elements from the same slab
(we know they're from the same slab as we build detached freelist and
sort the array) and so we read slab->obj_exts only once.

In __memcg_slab_post_alloc_hook() we don't know if the objects are from
the same slab, so we read slab->obj_exts multiple times and charge them.

I think we need to either 1) remove slab_obj_exts() and
then introduce is_slab_obj_exts() and see if it has impact on
performance, or 2) keep it as-is.

Thanks!

-- 
Cheers,
Harry / Hyeonggon
Re: [RFC PATCH V3 3/7] mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper
Posted by Suren Baghdasaryan 3 months, 1 week ago
On Wed, Oct 29, 2025 at 6:26 PM Harry Yoo <harry.yoo@oracle.com> wrote:
>
> On Wed, Oct 29, 2025 at 08:24:35AM -0700, Suren Baghdasaryan wrote:
> > On Wed, Oct 29, 2025 at 1:49 AM Harry Yoo <harry.yoo@oracle.com> wrote:
> > >
> > > On Tue, Oct 28, 2025 at 10:55:39AM -0700, Suren Baghdasaryan wrote:
> > > > On Mon, Oct 27, 2025 at 5:29 AM Harry Yoo <harry.yoo@oracle.com> wrote:
> > > > >
> > > > > Currently, the slab allocator assumes that slab->obj_exts is a pointer
> > > > > to an array of struct slabobj_ext objects. However, to support storage
> > > > > methods where struct slabobj_ext is embedded within objects, the slab
> > > > > allocator should not make this assumption. Instead of directly
> > > > > dereferencing the slabobj_exts array, abstract access to
> > > > > struct slabobj_ext via helper functions.
> > > > >
> > > > > Introduce a new API slabobj_ext metadata access:
> > > > >
> > > > >   slab_obj_ext(slab, obj_exts, index) - returns the pointer to
> > > > >   struct slabobj_ext element at the given index.
> > > > >
> > > > > Directly dereferencing the return value of slab_obj_exts() is no longer
> > > > > allowed. Instead, slab_obj_ext() must always be used to access
> > > > > individual struct slabobj_ext objects.
> > > >
> > > > If direct access to the vector is not allowed, it would be better to
> > > > eliminate slab_obj_exts() function completely and use the new
> > > > slab_obj_ext() instead. I think that's possible. We might need an
> > > > additional `bool is_slab_obj_exts()` helper for an early check before
> > > > we calculate the object index but that's quite easy.
> > >
> > > Good point, but that way we cannot avoid reading slab->obj_exts
> > > multiple times when we access slabobj_ext of multiple objects
> > > as it's accessed via READ_ONCE().
> >
> > True. I think we use slab->obj_exts to loop over its elements only in
> > two places: __memcg_slab_post_alloc_hook() and
> > __memcg_slab_free_hook(). I guess we could implement some kind of
> > slab_objext_foreach() construct to loop over all elements of
> > slab->obj_exts?
>
> Not sure if that would help here. In __memcg_slab_free_hook() we want to
> iterate only some of (not all of) elements from the same slab
> (we know they're from the same slab as we build detached freelist and
> sort the array) and so we read slab->obj_exts only once.
>
> In __memcg_slab_post_alloc_hook() we don't know if the objects are from
> the same slab, so we read slab->obj_exts multiple times and charge them.
>
> I think we need to either 1) remove slab_obj_exts() and
> then introduce is_slab_obj_exts() and see if it has impact on
> performance, or 2) keep it as-is.

Ok, it sounds like too much effort for avoiding a direct accessor.
Let's go with (2) for now.

>
> Thanks!
>
> --
> Cheers,
> Harry / Hyeonggon