[PATCH V6 8/9] mm/slab: move [__]ksize and slab_ksize() to mm/slub.c

Harry Yoo posted 9 patches 3 weeks, 5 days ago
[PATCH V6 8/9] mm/slab: move [__]ksize and slab_ksize() to mm/slub.c
Posted by Harry Yoo 3 weeks, 5 days ago
To access SLUB's internal implementation details beyond cache flags in
ksize(), move __ksize(), ksize(), and slab_ksize() to mm/slub.c.

Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
---
 mm/slab.h        | 25 --------------
 mm/slab_common.c | 61 ----------------------------------
 mm/slub.c        | 86 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 86 insertions(+), 86 deletions(-)

diff --git a/mm/slab.h b/mm/slab.h
index 5176c762ec7c..957586d68b3c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -665,31 +665,6 @@ void kvfree_rcu_cb(struct rcu_head *head);
 
 size_t __ksize(const void *objp);
 
-static inline size_t slab_ksize(const struct kmem_cache *s)
-{
-#ifdef CONFIG_SLUB_DEBUG
-	/*
-	 * Debugging requires use of the padding between object
-	 * and whatever may come after it.
-	 */
-	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
-		return s->object_size;
-#endif
-	if (s->flags & SLAB_KASAN)
-		return s->object_size;
-	/*
-	 * If we have the need to store the freelist pointer
-	 * back there or track user information then we can
-	 * only use the space before that information.
-	 */
-	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
-		return s->inuse;
-	/*
-	 * Else we can use all the padding etc for the allocation
-	 */
-	return s->size;
-}
-
 static inline unsigned int large_kmalloc_order(const struct page *page)
 {
 	return page[1].flags.f & 0xff;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index c4cf9ed2ec92..aed91fd6fd10 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -983,43 +983,6 @@ void __init create_kmalloc_caches(void)
 						       0, SLAB_NO_MERGE, NULL);
 }
 
-/**
- * __ksize -- Report full size of underlying allocation
- * @object: pointer to the object
- *
- * This should only be used internally to query the true size of allocations.
- * It is not meant to be a way to discover the usable size of an allocation
- * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
- * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
- * and/or FORTIFY_SOURCE.
- *
- * Return: size of the actual memory used by @object in bytes
- */
-size_t __ksize(const void *object)
-{
-	const struct page *page;
-	const struct slab *slab;
-
-	if (unlikely(object == ZERO_SIZE_PTR))
-		return 0;
-
-	page = virt_to_page(object);
-
-	if (unlikely(PageLargeKmalloc(page)))
-		return large_kmalloc_size(page);
-
-	slab = page_slab(page);
-	/* Delete this after we're sure there are no users */
-	if (WARN_ON(!slab))
-		return page_size(page);
-
-#ifdef CONFIG_SLUB_DEBUG
-	skip_orig_size_check(slab->slab_cache, object);
-#endif
-
-	return slab_ksize(slab->slab_cache);
-}
-
 gfp_t kmalloc_fix_flags(gfp_t flags)
 {
 	gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
@@ -1235,30 +1198,6 @@ void kfree_sensitive(const void *p)
 }
 EXPORT_SYMBOL(kfree_sensitive);
 
-size_t ksize(const void *objp)
-{
-	/*
-	 * We need to first check that the pointer to the object is valid.
-	 * The KASAN report printed from ksize() is more useful, then when
-	 * it's printed later when the behaviour could be undefined due to
-	 * a potential use-after-free or double-free.
-	 *
-	 * We use kasan_check_byte(), which is supported for the hardware
-	 * tag-based KASAN mode, unlike kasan_check_read/write().
-	 *
-	 * If the pointed to memory is invalid, we return 0 to avoid users of
-	 * ksize() writing to and potentially corrupting the memory region.
-	 *
-	 * We want to perform the check before __ksize(), to avoid potentially
-	 * crashing in __ksize() due to accessing invalid metadata.
-	 */
-	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
-		return 0;
-
-	return kfence_ksize(objp) ?: __ksize(objp);
-}
-EXPORT_SYMBOL(ksize);
-
 #ifdef CONFIG_BPF_SYSCALL
 #include <linux/btf.h>
 
diff --git a/mm/slub.c b/mm/slub.c
index e4a4e01de42f..2b76f352c3b0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -6948,6 +6948,92 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
+static inline size_t slab_ksize(const struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_DEBUG
+	/*
+	 * Debugging requires use of the padding between object
+	 * and whatever may come after it.
+	 */
+	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
+		return s->object_size;
+#endif
+	if (s->flags & SLAB_KASAN)
+		return s->object_size;
+	/*
+	 * If we have the need to store the freelist pointer
+	 * back there or track user information then we can
+	 * only use the space before that information.
+	 */
+	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
+		return s->inuse;
+	/*
+	 * Else we can use all the padding etc for the allocation
+	 */
+	return s->size;
+}
+
+/**
+ * __ksize -- Report full size of underlying allocation
+ * @object: pointer to the object
+ *
+ * This should only be used internally to query the true size of allocations.
+ * It is not meant to be a way to discover the usable size of an allocation
+ * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
+ * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
+ * and/or FORTIFY_SOURCE.
+ *
+ * Return: size of the actual memory used by @object in bytes
+ */
+size_t __ksize(const void *object)
+{
+	const struct page *page;
+	const struct slab *slab;
+
+	if (unlikely(object == ZERO_SIZE_PTR))
+		return 0;
+
+	page = virt_to_page(object);
+
+	if (unlikely(PageLargeKmalloc(page)))
+		return large_kmalloc_size(page);
+
+	slab = page_slab(page);
+	/* Delete this after we're sure there are no users */
+	if (WARN_ON(!slab))
+		return page_size(page);
+
+#ifdef CONFIG_SLUB_DEBUG
+	skip_orig_size_check(slab->slab_cache, object);
+#endif
+
+	return slab_ksize(slab->slab_cache);
+}
+
+size_t ksize(const void *objp)
+{
+	/*
+	 * We need to first check that the pointer to the object is valid.
+	 * The KASAN report printed from ksize() is more useful, then when
+	 * it's printed later when the behaviour could be undefined due to
+	 * a potential use-after-free or double-free.
+	 *
+	 * We use kasan_check_byte(), which is supported for the hardware
+	 * tag-based KASAN mode, unlike kasan_check_read/write().
+	 *
+	 * If the pointed to memory is invalid, we return 0 to avoid users of
+	 * ksize() writing to and potentially corrupting the memory region.
+	 *
+	 * We want to perform the check before __ksize(), to avoid potentially
+	 * crashing in __ksize() due to accessing invalid metadata.
+	 */
+	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
+		return 0;
+
+	return kfence_ksize(objp) ?: __ksize(objp);
+}
+EXPORT_SYMBOL(ksize);
+
 static void free_large_kmalloc(struct page *page, void *object)
 {
 	unsigned int order = compound_order(page);
-- 
2.43.0
Re: [PATCH V6 8/9] mm/slab: move [__]ksize and slab_ksize() to mm/slub.c
Posted by Vlastimil Babka 3 weeks, 5 days ago
On 1/13/26 7:18 AM, Harry Yoo wrote:
> To access SLUB's internal implementation details beyond cache flags in
> ksize(), move __ksize(), ksize(), and slab_ksize() to mm/slub.c.
> 
> Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
> ---
>  mm/slab.h        | 25 --------------
>  mm/slab_common.c | 61 ----------------------------------
>  mm/slub.c        | 86 ++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 86 insertions(+), 86 deletions(-)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index 5176c762ec7c..957586d68b3c 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -665,31 +665,6 @@ void kvfree_rcu_cb(struct rcu_head *head);
>  
>  size_t __ksize(const void *objp);
>  
> -static inline size_t slab_ksize(const struct kmem_cache *s)
> -{
> -#ifdef CONFIG_SLUB_DEBUG
> -	/*
> -	 * Debugging requires use of the padding between object
> -	 * and whatever may come after it.
> -	 */
> -	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
> -		return s->object_size;
> -#endif
> -	if (s->flags & SLAB_KASAN)
> -		return s->object_size;
> -	/*
> -	 * If we have the need to store the freelist pointer
> -	 * back there or track user information then we can
> -	 * only use the space before that information.
> -	 */
> -	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
> -		return s->inuse;
> -	/*
> -	 * Else we can use all the padding etc for the allocation
> -	 */
> -	return s->size;
> -}
> -
>  static inline unsigned int large_kmalloc_order(const struct page *page)
>  {
>  	return page[1].flags.f & 0xff;
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index c4cf9ed2ec92..aed91fd6fd10 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -983,43 +983,6 @@ void __init create_kmalloc_caches(void)
>  						       0, SLAB_NO_MERGE, NULL);
>  }
>  
> -/**
> - * __ksize -- Report full size of underlying allocation
> - * @object: pointer to the object
> - *
> - * This should only be used internally to query the true size of allocations.
> - * It is not meant to be a way to discover the usable size of an allocation
> - * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
> - * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
> - * and/or FORTIFY_SOURCE.
> - *
> - * Return: size of the actual memory used by @object in bytes
> - */
> -size_t __ksize(const void *object)

Think it could be also static and not in slab.h? I'll make that change
locally.
Re: [PATCH V6 8/9] mm/slab: move [__]ksize and slab_ksize() to mm/slub.c
Posted by Harry Yoo 3 weeks, 5 days ago
On Tue, Jan 13, 2026 at 01:44:45PM +0100, Vlastimil Babka wrote:
> On 1/13/26 7:18 AM, Harry Yoo wrote:
> > To access SLUB's internal implementation details beyond cache flags in
> > ksize(), move __ksize(), ksize(), and slab_ksize() to mm/slub.c.
> > 
> > Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
> > ---
> >  mm/slab.h        | 25 --------------
> >  mm/slab_common.c | 61 ----------------------------------
> >  mm/slub.c        | 86 ++++++++++++++++++++++++++++++++++++++++++++++++
> >  3 files changed, 86 insertions(+), 86 deletions(-)
> > 
> > diff --git a/mm/slab.h b/mm/slab.h
> > index 5176c762ec7c..957586d68b3c 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -665,31 +665,6 @@ void kvfree_rcu_cb(struct rcu_head *head);
> >  
> >  size_t __ksize(const void *objp);
> >  
> > -static inline size_t slab_ksize(const struct kmem_cache *s)
> > -{
> > -#ifdef CONFIG_SLUB_DEBUG
> > -	/*
> > -	 * Debugging requires use of the padding between object
> > -	 * and whatever may come after it.
> > -	 */
> > -	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
> > -		return s->object_size;
> > -#endif
> > -	if (s->flags & SLAB_KASAN)
> > -		return s->object_size;
> > -	/*
> > -	 * If we have the need to store the freelist pointer
> > -	 * back there or track user information then we can
> > -	 * only use the space before that information.
> > -	 */
> > -	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
> > -		return s->inuse;
> > -	/*
> > -	 * Else we can use all the padding etc for the allocation
> > -	 */
> > -	return s->size;
> > -}
> > -
> >  static inline unsigned int large_kmalloc_order(const struct page *page)
> >  {
> >  	return page[1].flags.f & 0xff;
> > diff --git a/mm/slab_common.c b/mm/slab_common.c
> > index c4cf9ed2ec92..aed91fd6fd10 100644
> > --- a/mm/slab_common.c
> > +++ b/mm/slab_common.c
> > @@ -983,43 +983,6 @@ void __init create_kmalloc_caches(void)
> >  						       0, SLAB_NO_MERGE, NULL);
> >  }
> >  
> > -/**
> > - * __ksize -- Report full size of underlying allocation
> > - * @object: pointer to the object
> > - *
> > - * This should only be used internally to query the true size of allocations.
> > - * It is not meant to be a way to discover the usable size of an allocation
> > - * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
> > - * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
> > - * and/or FORTIFY_SOURCE.
> > - *
> > - * Return: size of the actual memory used by @object in bytes
> > - */
> > -size_t __ksize(const void *object)
> 
> Think it could be also static and not in slab.h? I'll make that change
> locally.

Uh, great. Thanks!

By the way `size_t __ksize(const void *objp);` is in both
include/linux/slab.h and mm/slab.h.

-- 
Cheers,
Harry / Hyeonggon
Re: [PATCH V6 8/9] mm/slab: move [__]ksize and slab_ksize() to mm/slub.c
Posted by Vlastimil Babka 3 weeks, 5 days ago
On 1/13/26 2:05 PM, Harry Yoo wrote:
> On Tue, Jan 13, 2026 at 01:44:45PM +0100, Vlastimil Babka wrote:
>>>  
>>> -/**
>>> - * __ksize -- Report full size of underlying allocation
>>> - * @object: pointer to the object
>>> - *
>>> - * This should only be used internally to query the true size of allocations.
>>> - * It is not meant to be a way to discover the usable size of an allocation
>>> - * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
>>> - * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
>>> - * and/or FORTIFY_SOURCE.
>>> - *
>>> - * Return: size of the actual memory used by @object in bytes
>>> - */
>>> -size_t __ksize(const void *object)
>>
>> Think it could be also static and not in slab.h? I'll make that change
>> locally.
> 
> Uh, great. Thanks!
> 
> By the way `size_t __ksize(const void *objp);` is in both
> include/linux/slab.h and mm/slab.h.

Ack. Also moving the kerneldoc to ksize() as it should have been there
anyway.