In the near future, slabobj_ext may reside outside the allocated slab
object range within a slab, which could be reported as an out-of-bounds
access by KASAN. To prevent false positives, explicitly disable KASAN
and KMSAN checks when accessing slabobj_ext.
While an alternative approach could be to unpoison slabobj_ext,
out-of-bounds accesses outside the slab allocator are generally more
common.
Move metadata_access_enable()/disable() helpers to mm/slab.h so that
it can be used outside mm/slub.c. Wrap accesses to slabobj_ext metadata
in memcg and alloc_tag code with these helpers.
Call kasan_reset_tag() in slab_obj_ext() before returning the address to
prevent SW or HW tag-based KASAN from reporting false positives.
Suggested-by: Andrey Konovalov <andreyknvl@gmail.com>
Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
---
mm/memcontrol.c | 15 ++++++++++++---
mm/slab.h | 24 +++++++++++++++++++++++-
mm/slub.c | 33 +++++++++++++--------------------
3 files changed, 48 insertions(+), 24 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2a9dc246e802..38e6e9099ff5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2570,17 +2570,22 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
struct slabobj_ext *obj_ext;
struct slab *slab;
unsigned int off;
+ struct mem_cgroup *memcg;
slab = folio_slab(folio);
obj_exts = slab_obj_exts(slab);
if (!obj_exts)
return NULL;
+ metadata_access_enable();
off = obj_to_index(slab->slab_cache, slab, p);
obj_ext = slab_obj_ext(slab, obj_exts, off);
- if (obj_ext->objcg)
- return obj_cgroup_memcg(obj_ext->objcg);
-
+ if (obj_ext->objcg) {
+ memcg = obj_cgroup_memcg(obj_ext->objcg);
+ metadata_access_disable();
+ return memcg;
+ }
+ metadata_access_disable();
return NULL;
}
@@ -3197,9 +3202,11 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
obj_exts = slab_obj_exts(slab);
off = obj_to_index(s, slab, p[i]);
+ metadata_access_enable();
obj_ext = slab_obj_ext(slab, obj_exts, off);
obj_cgroup_get(objcg);
obj_ext->objcg = objcg;
+ metadata_access_disable();
}
return true;
@@ -3210,6 +3217,7 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
{
size_t obj_size = obj_full_size(s);
+ metadata_access_enable();
for (int i = 0; i < objects; i++) {
struct obj_cgroup *objcg;
struct slabobj_ext *obj_ext;
@@ -3226,6 +3234,7 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
slab_pgdat(slab), cache_vmstat_idx(s));
obj_cgroup_put(objcg);
}
+ metadata_access_disable();
}
/*
diff --git a/mm/slab.h b/mm/slab.h
index 22ee28cb55e1..13f4ca65cb42 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -591,10 +591,14 @@ static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
unsigned long obj_exts,
unsigned int index)
{
+ struct slabobj_ext *obj_ext;
+
VM_WARN_ON_ONCE(!slab_obj_exts(slab));
VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
- return (struct slabobj_ext *)(obj_exts + slab_get_stride(slab) * index);
+ obj_ext = (struct slabobj_ext *)(obj_exts +
+ slab_get_stride(slab) * index);
+ return kasan_reset_tag(obj_ext);
}
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
@@ -625,6 +629,24 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
}
+/*
+ * slub is about to manipulate internal object metadata. This memory lies
+ * outside the range of the allocated object, so accessing it would normally
+ * be reported by kasan as a bounds error. metadata_access_enable() is used
+ * to tell kasan that these accesses are OK.
+ */
+static inline void metadata_access_enable(void)
+{
+ kasan_disable_current();
+ kmsan_disable_current();
+}
+
+static inline void metadata_access_disable(void)
+{
+ kmsan_enable_current();
+ kasan_enable_current();
+}
+
#ifdef CONFIG_MEMCG
bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
gfp_t flags, size_t size, void **p);
diff --git a/mm/slub.c b/mm/slub.c
index 4383740a4d34..13acc9437ef5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -973,24 +973,6 @@ static slab_flags_t slub_debug;
static char *slub_debug_string;
static int disable_higher_order_debug;
-/*
- * slub is about to manipulate internal object metadata. This memory lies
- * outside the range of the allocated object, so accessing it would normally
- * be reported by kasan as a bounds error. metadata_access_enable() is used
- * to tell kasan that these accesses are OK.
- */
-static inline void metadata_access_enable(void)
-{
- kasan_disable_current();
- kmsan_disable_current();
-}
-
-static inline void metadata_access_disable(void)
-{
- kmsan_enable_current();
- kasan_enable_current();
-}
-
/*
* Object debugging
*/
@@ -2042,9 +2024,11 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
slab_exts, offs);
+ metadata_access_enable();
/* codetag should be NULL */
WARN_ON(ext->ref.ct);
set_codetag_empty(&ext->ref);
+ metadata_access_disable();
}
}
@@ -2245,8 +2229,11 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
* If other users appear then mem_alloc_profiling_enabled()
* check should be added before alloc_tag_add().
*/
- if (likely(obj_ext))
+ if (likely(obj_ext)) {
+ metadata_access_enable();
alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size);
+ metadata_access_disable();
+ }
}
static inline void
@@ -2272,11 +2259,13 @@ __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p
if (!obj_exts)
return;
+ metadata_access_enable();
for (i = 0; i < objects; i++) {
unsigned int off = obj_to_index(s, slab, p[i]);
alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
}
+ metadata_access_disable();
}
static inline void
@@ -2394,8 +2383,12 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
if (obj_exts) {
off = obj_to_index(s, slab, p);
obj_ext = slab_obj_ext(slab, obj_exts, off);
- if (unlikely(obj_ext->objcg))
+ metadata_access_enable();
+ if (unlikely(obj_ext->objcg)) {
+ metadata_access_disable();
return true;
+ }
+ metadata_access_disable();
}
return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
--
2.43.0
On Mon, Oct 27, 2025 at 5:29 AM Harry Yoo <harry.yoo@oracle.com> wrote:
>
> In the near future, slabobj_ext may reside outside the allocated slab
> object range within a slab, which could be reported as an out-of-bounds
> access by KASAN. To prevent false positives, explicitly disable KASAN
> and KMSAN checks when accessing slabobj_ext.
Hmm. This is fragile IMO. Every time someone accesses slabobj_ext they
should remember to call
metadata_access_enable/metadata_access_disable.
Have you considered replacing slab_obj_ext() function with
get_slab_obj_ext()/put_slab_obj_ext()? get_slab_obj_ext() can call
metadata_access_enable() and return slabobj_ext as it does today.
put_slab_obj_ext() will simple call metadata_access_disable(). WDYT?
>
> While an alternative approach could be to unpoison slabobj_ext,
> out-of-bounds accesses outside the slab allocator are generally more
> common.
>
> Move metadata_access_enable()/disable() helpers to mm/slab.h so that
> it can be used outside mm/slub.c. Wrap accesses to slabobj_ext metadata
> in memcg and alloc_tag code with these helpers.
>
> Call kasan_reset_tag() in slab_obj_ext() before returning the address to
> prevent SW or HW tag-based KASAN from reporting false positives.
>
> Suggested-by: Andrey Konovalov <andreyknvl@gmail.com>
> Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
> ---
> mm/memcontrol.c | 15 ++++++++++++---
> mm/slab.h | 24 +++++++++++++++++++++++-
> mm/slub.c | 33 +++++++++++++--------------------
> 3 files changed, 48 insertions(+), 24 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 2a9dc246e802..38e6e9099ff5 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -2570,17 +2570,22 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
> struct slabobj_ext *obj_ext;
> struct slab *slab;
> unsigned int off;
> + struct mem_cgroup *memcg;
>
> slab = folio_slab(folio);
> obj_exts = slab_obj_exts(slab);
> if (!obj_exts)
> return NULL;
>
> + metadata_access_enable();
> off = obj_to_index(slab->slab_cache, slab, p);
> obj_ext = slab_obj_ext(slab, obj_exts, off);
> - if (obj_ext->objcg)
> - return obj_cgroup_memcg(obj_ext->objcg);
> -
> + if (obj_ext->objcg) {
> + memcg = obj_cgroup_memcg(obj_ext->objcg);
> + metadata_access_disable();
> + return memcg;
> + }
> + metadata_access_disable();
> return NULL;
> }
>
> @@ -3197,9 +3202,11 @@ bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
>
> obj_exts = slab_obj_exts(slab);
> off = obj_to_index(s, slab, p[i]);
> + metadata_access_enable();
> obj_ext = slab_obj_ext(slab, obj_exts, off);
> obj_cgroup_get(objcg);
> obj_ext->objcg = objcg;
> + metadata_access_disable();
> }
>
> return true;
> @@ -3210,6 +3217,7 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> {
> size_t obj_size = obj_full_size(s);
>
> + metadata_access_enable();
> for (int i = 0; i < objects; i++) {
> struct obj_cgroup *objcg;
> struct slabobj_ext *obj_ext;
> @@ -3226,6 +3234,7 @@ void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
> slab_pgdat(slab), cache_vmstat_idx(s));
> obj_cgroup_put(objcg);
> }
> + metadata_access_disable();
> }
>
> /*
> diff --git a/mm/slab.h b/mm/slab.h
> index 22ee28cb55e1..13f4ca65cb42 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -591,10 +591,14 @@ static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
> unsigned long obj_exts,
> unsigned int index)
> {
> + struct slabobj_ext *obj_ext;
> +
> VM_WARN_ON_ONCE(!slab_obj_exts(slab));
> VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
>
> - return (struct slabobj_ext *)(obj_exts + slab_get_stride(slab) * index);
> + obj_ext = (struct slabobj_ext *)(obj_exts +
> + slab_get_stride(slab) * index);
> + return kasan_reset_tag(obj_ext);
> }
>
> int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> @@ -625,6 +629,24 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
> NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
> }
>
> +/*
> + * slub is about to manipulate internal object metadata. This memory lies
> + * outside the range of the allocated object, so accessing it would normally
> + * be reported by kasan as a bounds error. metadata_access_enable() is used
> + * to tell kasan that these accesses are OK.
> + */
> +static inline void metadata_access_enable(void)
> +{
> + kasan_disable_current();
> + kmsan_disable_current();
> +}
> +
> +static inline void metadata_access_disable(void)
> +{
> + kmsan_enable_current();
> + kasan_enable_current();
> +}
> +
> #ifdef CONFIG_MEMCG
> bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
> gfp_t flags, size_t size, void **p);
> diff --git a/mm/slub.c b/mm/slub.c
> index 4383740a4d34..13acc9437ef5 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -973,24 +973,6 @@ static slab_flags_t slub_debug;
> static char *slub_debug_string;
> static int disable_higher_order_debug;
>
> -/*
> - * slub is about to manipulate internal object metadata. This memory lies
> - * outside the range of the allocated object, so accessing it would normally
> - * be reported by kasan as a bounds error. metadata_access_enable() is used
> - * to tell kasan that these accesses are OK.
> - */
> -static inline void metadata_access_enable(void)
> -{
> - kasan_disable_current();
> - kmsan_disable_current();
> -}
> -
> -static inline void metadata_access_disable(void)
> -{
> - kmsan_enable_current();
> - kasan_enable_current();
> -}
> -
> /*
> * Object debugging
> */
> @@ -2042,9 +2024,11 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
> struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
> slab_exts, offs);
>
> + metadata_access_enable();
> /* codetag should be NULL */
> WARN_ON(ext->ref.ct);
> set_codetag_empty(&ext->ref);
> + metadata_access_disable();
> }
> }
>
> @@ -2245,8 +2229,11 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
> * If other users appear then mem_alloc_profiling_enabled()
> * check should be added before alloc_tag_add().
> */
> - if (likely(obj_ext))
> + if (likely(obj_ext)) {
> + metadata_access_enable();
> alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size);
> + metadata_access_disable();
> + }
> }
>
> static inline void
> @@ -2272,11 +2259,13 @@ __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p
> if (!obj_exts)
> return;
>
> + metadata_access_enable();
> for (i = 0; i < objects; i++) {
> unsigned int off = obj_to_index(s, slab, p[i]);
>
> alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
> }
> + metadata_access_disable();
> }
>
> static inline void
> @@ -2394,8 +2383,12 @@ bool memcg_slab_post_charge(void *p, gfp_t flags)
> if (obj_exts) {
> off = obj_to_index(s, slab, p);
> obj_ext = slab_obj_ext(slab, obj_exts, off);
> - if (unlikely(obj_ext->objcg))
> + metadata_access_enable();
> + if (unlikely(obj_ext->objcg)) {
> + metadata_access_disable();
> return true;
> + }
> + metadata_access_disable();
> }
>
> return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
> --
> 2.43.0
>
On Tue, Oct 28, 2025 at 04:03:22PM -0700, Suren Baghdasaryan wrote: > On Mon, Oct 27, 2025 at 5:29 AM Harry Yoo <harry.yoo@oracle.com> wrote: > > > > In the near future, slabobj_ext may reside outside the allocated slab > > object range within a slab, which could be reported as an out-of-bounds > > access by KASAN. To prevent false positives, explicitly disable KASAN > > and KMSAN checks when accessing slabobj_ext. > > Hmm. This is fragile IMO. Every time someone accesses slabobj_ext they > should remember to call > metadata_access_enable/metadata_access_disable. Good point! > Have you considered replacing slab_obj_ext() function with > get_slab_obj_ext()/put_slab_obj_ext()? get_slab_obj_ext() can call > metadata_access_enable() and return slabobj_ext as it does today. > put_slab_obj_ext() will simple call metadata_access_disable(). WDYT? I did think about it, and I thought introducing get and put helpers may be misunderstood as doing some kind of reference counting... but yeah probably I'm being too paranoid and I'll try this and document that 1) the user needs to use get and put pair to access slabobj_ext metadata, and 2) calling get and put pair multiple times has no effect. > > While an alternative approach could be to unpoison slabobj_ext, > > out-of-bounds accesses outside the slab allocator are generally more > > common. > > > > Move metadata_access_enable()/disable() helpers to mm/slab.h so that > > it can be used outside mm/slub.c. Wrap accesses to slabobj_ext metadata > > in memcg and alloc_tag code with these helpers. > > > > Call kasan_reset_tag() in slab_obj_ext() before returning the address to > > prevent SW or HW tag-based KASAN from reporting false positives. > > > > Suggested-by: Andrey Konovalov <andreyknvl@gmail.com> > > Signed-off-by: Harry Yoo <harry.yoo@oracle.com> > > --- -- Cheers, Harry / Hyeonggon
On Wed, Oct 29, 2025 at 1:06 AM Harry Yoo <harry.yoo@oracle.com> wrote: > > On Tue, Oct 28, 2025 at 04:03:22PM -0700, Suren Baghdasaryan wrote: > > On Mon, Oct 27, 2025 at 5:29 AM Harry Yoo <harry.yoo@oracle.com> wrote: > > > > > > In the near future, slabobj_ext may reside outside the allocated slab > > > object range within a slab, which could be reported as an out-of-bounds > > > access by KASAN. To prevent false positives, explicitly disable KASAN > > > and KMSAN checks when accessing slabobj_ext. > > > > Hmm. This is fragile IMO. Every time someone accesses slabobj_ext they > > should remember to call > > metadata_access_enable/metadata_access_disable. > > Good point! > > > Have you considered replacing slab_obj_ext() function with > > get_slab_obj_ext()/put_slab_obj_ext()? get_slab_obj_ext() can call > > metadata_access_enable() and return slabobj_ext as it does today. > > put_slab_obj_ext() will simple call metadata_access_disable(). WDYT? > > I did think about it, and I thought introducing get and put helpers > may be misunderstood as doing some kind of reference counting... Maybe there are better names but get/put I think are appropriate here. get_cpu_ptr()/put_cpu_ptr() example is very similar to this. > > but yeah probably I'm being too paranoid and > I'll try this and document that > > 1) the user needs to use get and put pair to access slabobj_ext > metadata, and > > 2) calling get and put pair multiple times has no effect. Yes, I think this would be less error-prone. > > > > While an alternative approach could be to unpoison slabobj_ext, > > > out-of-bounds accesses outside the slab allocator are generally more > > > common. > > > > > > Move metadata_access_enable()/disable() helpers to mm/slab.h so that > > > it can be used outside mm/slub.c. Wrap accesses to slabobj_ext metadata > > > in memcg and alloc_tag code with these helpers. > > > > > > Call kasan_reset_tag() in slab_obj_ext() before returning the address to > > > prevent SW or HW tag-based KASAN from reporting false positives. > > > > > > Suggested-by: Andrey Konovalov <andreyknvl@gmail.com> > > > Signed-off-by: Harry Yoo <harry.yoo@oracle.com> > > > --- > > -- > Cheers, > Harry / Hyeonggon
© 2016 - 2026 Red Hat, Inc.