mm/kasan/kasan_test_c.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+)
Verify that KASAN does not quarantine objects in SLAB_TYPESAFE_BY_RCU slabs
if CONFIG_SLUB_RCU_DEBUG is off.
Signed-off-by: Jann Horn <jannh@google.com>
---
changes in v2:
- disable migration to ensure that all SLUB operations use the same
percpu state (vbabka)
- use EXPECT instead of ASSERT for pointer equality check so that
expectation failure doesn't terminate the test with migration still
disabled
---
mm/kasan/kasan_test_c.c | 38 ++++++++++++++++++++++++++++++++++++++
1 file changed, 38 insertions(+)
diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c
index 5f922dd38ffa..0d50402d492c 100644
--- a/mm/kasan/kasan_test_c.c
+++ b/mm/kasan/kasan_test_c.c
@@ -1073,6 +1073,43 @@ static void kmem_cache_rcu_uaf(struct kunit *test)
kmem_cache_destroy(cache);
}
+/*
+ * Check that SLAB_TYPESAFE_BY_RCU objects are immediately reused when
+ * CONFIG_SLUB_RCU_DEBUG is off, and stay at the same address.
+ */
+static void kmem_cache_rcu_reuse(struct kunit *test)
+{
+ char *p, *p2;
+ struct kmem_cache *cache;
+
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_SLUB_RCU_DEBUG);
+
+ cache = kmem_cache_create("test_cache", 16, 0, SLAB_TYPESAFE_BY_RCU,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+ migrate_disable();
+ p = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ goto out;
+ }
+
+ kmem_cache_free(cache, p);
+ p2 = kmem_cache_alloc(cache, GFP_KERNEL);
+ if (!p2) {
+ kunit_err(test, "Allocation failed: %s\n", __func__);
+ goto out;
+ }
+ KUNIT_EXPECT_PTR_EQ(test, p, p2);
+
+ kmem_cache_free(cache, p2);
+
+out:
+ migrate_enable();
+ kmem_cache_destroy(cache);
+}
+
static void kmem_cache_double_destroy(struct kunit *test)
{
struct kmem_cache *cache;
@@ -2098,6 +2135,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmem_cache_double_free),
KUNIT_CASE(kmem_cache_invalid_free),
KUNIT_CASE(kmem_cache_rcu_uaf),
+ KUNIT_CASE(kmem_cache_rcu_reuse),
KUNIT_CASE(kmem_cache_double_destroy),
KUNIT_CASE(kmem_cache_accounted),
KUNIT_CASE(kmem_cache_bulk),
---
base-commit: 0df7d6c9705b283d5b71ee0ae86ead05bd3a55a9
change-id: 20250728-kasan-tsbrcu-noquarantine-test-5c723367e056
prerequisite-change-id: 20250723-kasan-tsbrcu-noquarantine-e207bb990e24:v1
prerequisite-patch-id: 4fab9d3a121bfcaacc32a40f606b7c04e0c6fdd0
--
Jann Horn <jannh@google.com>
On Tue, Jul 29, 2025 at 6:49 PM Jann Horn <jannh@google.com> wrote: > > Verify that KASAN does not quarantine objects in SLAB_TYPESAFE_BY_RCU slabs > if CONFIG_SLUB_RCU_DEBUG is off. > > Signed-off-by: Jann Horn <jannh@google.com> > --- > changes in v2: > - disable migration to ensure that all SLUB operations use the same > percpu state (vbabka) > - use EXPECT instead of ASSERT for pointer equality check so that > expectation failure doesn't terminate the test with migration still > disabled > --- > mm/kasan/kasan_test_c.c | 38 ++++++++++++++++++++++++++++++++++++++ > 1 file changed, 38 insertions(+) > > diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c > index 5f922dd38ffa..0d50402d492c 100644 > --- a/mm/kasan/kasan_test_c.c > +++ b/mm/kasan/kasan_test_c.c > @@ -1073,6 +1073,43 @@ static void kmem_cache_rcu_uaf(struct kunit *test) > kmem_cache_destroy(cache); > } > > +/* > + * Check that SLAB_TYPESAFE_BY_RCU objects are immediately reused when > + * CONFIG_SLUB_RCU_DEBUG is off, and stay at the same address. Would be great to also add an explanation of why we want to test for this (or a reference to the related fix commit?). > + */ > +static void kmem_cache_rcu_reuse(struct kunit *test) > +{ > + char *p, *p2; > + struct kmem_cache *cache; > + > + KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_SLUB_RCU_DEBUG); > + > + cache = kmem_cache_create("test_cache", 16, 0, SLAB_TYPESAFE_BY_RCU, > + NULL); > + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); > + > + migrate_disable(); > + p = kmem_cache_alloc(cache, GFP_KERNEL); > + if (!p) { > + kunit_err(test, "Allocation failed: %s\n", __func__); > + goto out; > + } > + > + kmem_cache_free(cache, p); > + p2 = kmem_cache_alloc(cache, GFP_KERNEL); > + if (!p2) { > + kunit_err(test, "Allocation failed: %s\n", __func__); > + goto out; > + } > + KUNIT_EXPECT_PTR_EQ(test, p, p2); I think this might fail for the HW_TAGS mode? The location will be reused, but the tag will be different. We could mark this test as Generic mode only. > + > + kmem_cache_free(cache, p2); > + > +out: > + migrate_enable(); > + kmem_cache_destroy(cache); > +} > + > static void kmem_cache_double_destroy(struct kunit *test) > { > struct kmem_cache *cache; > @@ -2098,6 +2135,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { > KUNIT_CASE(kmem_cache_double_free), > KUNIT_CASE(kmem_cache_invalid_free), > KUNIT_CASE(kmem_cache_rcu_uaf), > + KUNIT_CASE(kmem_cache_rcu_reuse), > KUNIT_CASE(kmem_cache_double_destroy), > KUNIT_CASE(kmem_cache_accounted), > KUNIT_CASE(kmem_cache_bulk), > > --- > base-commit: 0df7d6c9705b283d5b71ee0ae86ead05bd3a55a9 > change-id: 20250728-kasan-tsbrcu-noquarantine-test-5c723367e056 > prerequisite-change-id: 20250723-kasan-tsbrcu-noquarantine-e207bb990e24:v1 > prerequisite-patch-id: 4fab9d3a121bfcaacc32a40f606b7c04e0c6fdd0 > > -- > Jann Horn <jannh@google.com> > Thank you!
On Thu, Aug 14, 2025 at 7:10 AM Andrey Konovalov <andreyknvl@gmail.com> wrote: > On Tue, Jul 29, 2025 at 6:49 PM Jann Horn <jannh@google.com> wrote: > > Verify that KASAN does not quarantine objects in SLAB_TYPESAFE_BY_RCU slabs > > if CONFIG_SLUB_RCU_DEBUG is off. > > > > Signed-off-by: Jann Horn <jannh@google.com> > > --- > > changes in v2: > > - disable migration to ensure that all SLUB operations use the same > > percpu state (vbabka) > > - use EXPECT instead of ASSERT for pointer equality check so that > > expectation failure doesn't terminate the test with migration still > > disabled > > --- > > mm/kasan/kasan_test_c.c | 38 ++++++++++++++++++++++++++++++++++++++ > > 1 file changed, 38 insertions(+) > > > > diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c > > index 5f922dd38ffa..0d50402d492c 100644 > > --- a/mm/kasan/kasan_test_c.c > > +++ b/mm/kasan/kasan_test_c.c > > @@ -1073,6 +1073,43 @@ static void kmem_cache_rcu_uaf(struct kunit *test) > > kmem_cache_destroy(cache); > > } > > > > +/* > > + * Check that SLAB_TYPESAFE_BY_RCU objects are immediately reused when > > + * CONFIG_SLUB_RCU_DEBUG is off, and stay at the same address. > > Would be great to also add an explanation of why we want to test for > this (or a reference to the related fix commit?). Okay, I'll add a sentence here, will send v3 in a bit. > > + */ > > +static void kmem_cache_rcu_reuse(struct kunit *test) > > +{ > > + char *p, *p2; > > + struct kmem_cache *cache; > > + > > + KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_SLUB_RCU_DEBUG); > > + > > + cache = kmem_cache_create("test_cache", 16, 0, SLAB_TYPESAFE_BY_RCU, > > + NULL); > > + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); > > + > > + migrate_disable(); > > + p = kmem_cache_alloc(cache, GFP_KERNEL); > > + if (!p) { > > + kunit_err(test, "Allocation failed: %s\n", __func__); > > + goto out; > > + } > > + > > + kmem_cache_free(cache, p); > > + p2 = kmem_cache_alloc(cache, GFP_KERNEL); > > + if (!p2) { > > + kunit_err(test, "Allocation failed: %s\n", __func__); > > + goto out; > > + } > > + KUNIT_EXPECT_PTR_EQ(test, p, p2); > > I think this might fail for the HW_TAGS mode? The location will be > reused, but the tag will be different. No, it's a SLAB_TYPESAFE_BY_RCU cache, so the tag can't really be different. poison_slab_object() will bail out, and assign_tag() will reuse the already-assigned tag.
On Thu, Aug 14, 2025 at 5:05 PM Jann Horn <jannh@google.com> wrote: > > > I think this might fail for the HW_TAGS mode? The location will be > > reused, but the tag will be different. > > No, it's a SLAB_TYPESAFE_BY_RCU cache, so the tag can't really be > different. poison_slab_object() will bail out, and assign_tag() will > reuse the already-assigned tag. Ah, right!
On 7/29/25 18:49, Jann Horn wrote: > Verify that KASAN does not quarantine objects in SLAB_TYPESAFE_BY_RCU slabs > if CONFIG_SLUB_RCU_DEBUG is off. > > Signed-off-by: Jann Horn <jannh@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> > --- > changes in v2: > - disable migration to ensure that all SLUB operations use the same > percpu state (vbabka) > - use EXPECT instead of ASSERT for pointer equality check so that > expectation failure doesn't terminate the test with migration still > disabled > --- > mm/kasan/kasan_test_c.c | 38 ++++++++++++++++++++++++++++++++++++++ > 1 file changed, 38 insertions(+) > > diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c > index 5f922dd38ffa..0d50402d492c 100644 > --- a/mm/kasan/kasan_test_c.c > +++ b/mm/kasan/kasan_test_c.c > @@ -1073,6 +1073,43 @@ static void kmem_cache_rcu_uaf(struct kunit *test) > kmem_cache_destroy(cache); > } > > +/* > + * Check that SLAB_TYPESAFE_BY_RCU objects are immediately reused when > + * CONFIG_SLUB_RCU_DEBUG is off, and stay at the same address. > + */ > +static void kmem_cache_rcu_reuse(struct kunit *test) > +{ > + char *p, *p2; > + struct kmem_cache *cache; > + > + KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_SLUB_RCU_DEBUG); > + > + cache = kmem_cache_create("test_cache", 16, 0, SLAB_TYPESAFE_BY_RCU, > + NULL); > + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); > + > + migrate_disable(); > + p = kmem_cache_alloc(cache, GFP_KERNEL); > + if (!p) { > + kunit_err(test, "Allocation failed: %s\n", __func__); > + goto out; > + } > + > + kmem_cache_free(cache, p); > + p2 = kmem_cache_alloc(cache, GFP_KERNEL); > + if (!p2) { > + kunit_err(test, "Allocation failed: %s\n", __func__); > + goto out; > + } > + KUNIT_EXPECT_PTR_EQ(test, p, p2); > + > + kmem_cache_free(cache, p2); > + > +out: > + migrate_enable(); > + kmem_cache_destroy(cache); > +} > + > static void kmem_cache_double_destroy(struct kunit *test) > { > struct kmem_cache *cache; > @@ -2098,6 +2135,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { > KUNIT_CASE(kmem_cache_double_free), > KUNIT_CASE(kmem_cache_invalid_free), > KUNIT_CASE(kmem_cache_rcu_uaf), > + KUNIT_CASE(kmem_cache_rcu_reuse), > KUNIT_CASE(kmem_cache_double_destroy), > KUNIT_CASE(kmem_cache_accounted), > KUNIT_CASE(kmem_cache_bulk), > > --- > base-commit: 0df7d6c9705b283d5b71ee0ae86ead05bd3a55a9 > change-id: 20250728-kasan-tsbrcu-noquarantine-test-5c723367e056 > prerequisite-change-id: 20250723-kasan-tsbrcu-noquarantine-e207bb990e24:v1 > prerequisite-patch-id: 4fab9d3a121bfcaacc32a40f606b7c04e0c6fdd0 >
© 2016 - 2025 Red Hat, Inc.