Currently, when KASAN is combined with init-on-free behavior, the
initialization happens before KASAN's "invalid free" checks.
More importantly, a subsequent commit will want to use the object metadata
region to store an rcu_head, and we should let KASAN check that the object
pointer is valid before that. (Otherwise that change will make the existing
testcase kmem_cache_invalid_free fail.)
So add a new KASAN hook that allows KASAN to pre-validate a
kmem_cache_free() operation before SLUB actually starts modifying the
object or its metadata.
Signed-off-by: Jann Horn <jannh@google.com>
---
include/linux/kasan.h | 10 ++++++++++
mm/kasan/common.c | 51 +++++++++++++++++++++++++++++++++++++++------------
mm/slub.c | 7 +++++++
3 files changed, 56 insertions(+), 12 deletions(-)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 70d6a8f6e25d..eee8ca1dcb40 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -175,6 +175,16 @@ static __always_inline void * __must_check kasan_init_slab_obj(
return (void *)object;
}
+bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
+ unsigned long ip);
+static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
+ void *object)
+{
+ if (kasan_enabled())
+ return __kasan_slab_pre_free(s, object, _RET_IP_);
+ return false;
+}
+
bool __kasan_slab_free(struct kmem_cache *s, void *object,
unsigned long ip, bool init);
static __always_inline bool kasan_slab_free(struct kmem_cache *s,
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 85e7c6b4575c..7c7fc6ce7eb7 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -208,31 +208,52 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
return (void *)object;
}
-static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
- unsigned long ip, bool init)
+enum free_validation_result {
+ KASAN_FREE_IS_IGNORED,
+ KASAN_FREE_IS_VALID,
+ KASAN_FREE_IS_INVALID
+};
+
+static enum free_validation_result check_slab_free(struct kmem_cache *cache,
+ void *object, unsigned long ip)
{
- void *tagged_object;
+ void *tagged_object = object;
- if (!kasan_arch_is_ready())
- return false;
+ if (is_kfence_address(object) || !kasan_arch_is_ready())
+ return KASAN_FREE_IS_IGNORED;
- tagged_object = object;
object = kasan_reset_tag(object);
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
- return true;
+ return KASAN_FREE_IS_INVALID;
}
- /* RCU slabs could be legally used after free within the RCU period. */
- if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
- return false;
-
if (!kasan_byte_accessible(tagged_object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
- return true;
+ return KASAN_FREE_IS_INVALID;
}
+ return KASAN_FREE_IS_VALID;
+}
+
+static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
+ unsigned long ip, bool init)
+{
+ void *tagged_object = object;
+ enum free_validation_result valid = check_slab_free(cache, object, ip);
+
+ if (valid == KASAN_FREE_IS_IGNORED)
+ return false;
+ if (valid == KASAN_FREE_IS_INVALID)
+ return true;
+
+ object = kasan_reset_tag(object);
+
+ /* RCU slabs could be legally used after free within the RCU period. */
+ if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
+ return false;
+
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_SLAB_FREE, init);
@@ -242,6 +263,12 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
return false;
}
+bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
+ unsigned long ip)
+{
+ return check_slab_free(cache, object, ip) == KASAN_FREE_IS_INVALID;
+}
+
bool __kasan_slab_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool init)
{
diff --git a/mm/slub.c b/mm/slub.c
index 4927edec6a8c..34724704c52d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2170,6 +2170,13 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
if (kfence_free(x))
return false;
+ /*
+ * Give KASAN a chance to notice an invalid free operation before we
+ * modify the object.
+ */
+ if (kasan_slab_pre_free(s, x))
+ return false;
+
/*
* As memory initialization might be integrated into KASAN,
* kasan_slab_free and initialization memset's must be
--
2.45.2.1089.g2a221341d9-goog
On 7/24/24 6:34 PM, Jann Horn wrote:
> Currently, when KASAN is combined with init-on-free behavior, the
> initialization happens before KASAN's "invalid free" checks.
>
> More importantly, a subsequent commit will want to use the object metadata
> region to store an rcu_head, and we should let KASAN check that the object
> pointer is valid before that. (Otherwise that change will make the existing
> testcase kmem_cache_invalid_free fail.)
>
> So add a new KASAN hook that allows KASAN to pre-validate a
> kmem_cache_free() operation before SLUB actually starts modifying the
> object or its metadata.
>
> Signed-off-by: Jann Horn <jannh@google.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz> #slub
> ---
> include/linux/kasan.h | 10 ++++++++++
> mm/kasan/common.c | 51 +++++++++++++++++++++++++++++++++++++++------------
> mm/slub.c | 7 +++++++
> 3 files changed, 56 insertions(+), 12 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 70d6a8f6e25d..eee8ca1dcb40 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -175,6 +175,16 @@ static __always_inline void * __must_check kasan_init_slab_obj(
> return (void *)object;
> }
>
> +bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
> + unsigned long ip);
> +static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
> + void *object)
> +{
> + if (kasan_enabled())
> + return __kasan_slab_pre_free(s, object, _RET_IP_);
> + return false;
> +}
> +
> bool __kasan_slab_free(struct kmem_cache *s, void *object,
> unsigned long ip, bool init);
> static __always_inline bool kasan_slab_free(struct kmem_cache *s,
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 85e7c6b4575c..7c7fc6ce7eb7 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -208,31 +208,52 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
> return (void *)object;
> }
>
> -static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> - unsigned long ip, bool init)
> +enum free_validation_result {
> + KASAN_FREE_IS_IGNORED,
> + KASAN_FREE_IS_VALID,
> + KASAN_FREE_IS_INVALID
> +};
> +
> +static enum free_validation_result check_slab_free(struct kmem_cache *cache,
> + void *object, unsigned long ip)
> {
> - void *tagged_object;
> + void *tagged_object = object;
>
> - if (!kasan_arch_is_ready())
> - return false;
> + if (is_kfence_address(object) || !kasan_arch_is_ready())
> + return KASAN_FREE_IS_IGNORED;
>
> - tagged_object = object;
> object = kasan_reset_tag(object);
>
> if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
> kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
> - return true;
> + return KASAN_FREE_IS_INVALID;
> }
>
> - /* RCU slabs could be legally used after free within the RCU period. */
> - if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
> - return false;
> -
> if (!kasan_byte_accessible(tagged_object)) {
> kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
> - return true;
> + return KASAN_FREE_IS_INVALID;
> }
>
> + return KASAN_FREE_IS_VALID;
> +}
> +
> +static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> + unsigned long ip, bool init)
> +{
> + void *tagged_object = object;
> + enum free_validation_result valid = check_slab_free(cache, object, ip);
> +
> + if (valid == KASAN_FREE_IS_IGNORED)
> + return false;
> + if (valid == KASAN_FREE_IS_INVALID)
> + return true;
> +
> + object = kasan_reset_tag(object);
> +
> + /* RCU slabs could be legally used after free within the RCU period. */
> + if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
> + return false;
> +
> kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
> KASAN_SLAB_FREE, init);
>
> @@ -242,6 +263,12 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> return false;
> }
>
> +bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
> + unsigned long ip)
> +{
> + return check_slab_free(cache, object, ip) == KASAN_FREE_IS_INVALID;
> +}
> +
> bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> unsigned long ip, bool init)
> {
> diff --git a/mm/slub.c b/mm/slub.c
> index 4927edec6a8c..34724704c52d 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2170,6 +2170,13 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
> if (kfence_free(x))
> return false;
>
> + /*
> + * Give KASAN a chance to notice an invalid free operation before we
> + * modify the object.
> + */
> + if (kasan_slab_pre_free(s, x))
> + return false;
> +
> /*
> * As memory initialization might be integrated into KASAN,
> * kasan_slab_free and initialization memset's must be
>
On Wed, 24 Jul 2024 18:34:12 +0200 Jann Horn <jannh@google.com> wrote:
> Currently, when KASAN is combined with init-on-free behavior, the
> initialization happens before KASAN's "invalid free" checks.
>
> More importantly, a subsequent commit will want to use the object metadata
> region to store an rcu_head, and we should let KASAN check that the object
> pointer is valid before that. (Otherwise that change will make the existing
> testcase kmem_cache_invalid_free fail.)
>
> So add a new KASAN hook that allows KASAN to pre-validate a
> kmem_cache_free() operation before SLUB actually starts modifying the
> object or its metadata.
I added this, to fix the CONFIG_KASAN=n build
--- a/include/linux/kasan.h~kasan-catch-invalid-free-before-slub-reinitializes-the-object-fix
+++ a/include/linux/kasan.h
@@ -381,6 +381,12 @@ static inline void *kasan_init_slab_obj(
{
return (void *)object;
}
+
+static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
+{
+ return false;
+}
+
static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
{
return false;
_
On Wed, Jul 24, 2024 at 11:17 PM Andrew Morton <akpm@linux-foundation.org> wrote: > On Wed, 24 Jul 2024 18:34:12 +0200 Jann Horn <jannh@google.com> wrote: > > > Currently, when KASAN is combined with init-on-free behavior, the > > initialization happens before KASAN's "invalid free" checks. > > > > More importantly, a subsequent commit will want to use the object metadata > > region to store an rcu_head, and we should let KASAN check that the object > > pointer is valid before that. (Otherwise that change will make the existing > > testcase kmem_cache_invalid_free fail.) > > > > So add a new KASAN hook that allows KASAN to pre-validate a > > kmem_cache_free() operation before SLUB actually starts modifying the > > object or its metadata. > > I added this, to fix the CONFIG_KASAN=n build Whoops, thanks for fixing that up.
© 2016 - 2025 Red Hat, Inc.