[PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG

Jann Horn posted 2 patches 1 month, 3 weeks ago
There is a newer version of this series
[PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG
Posted by Jann Horn 1 month, 3 weeks ago
Currently, KASAN is unable to catch use-after-free in SLAB_TYPESAFE_BY_RCU
slabs because use-after-free is allowed within the RCU grace period by
design.

Add a SLUB debugging feature which RCU-delays every individual
kmem_cache_free() before either actually freeing the object or handing it
off to KASAN, and change KASAN to poison freed objects as normal when this
option is enabled.

For now I've configured Kconfig.debug to default-enable this feature in the
KASAN GENERIC and SW_TAGS modes; I'm not enabling it by default in HW_TAGS
mode because I'm not sure if it might have unwanted performance degradation
effects there.

Note that this is mostly useful with KASAN in the quarantine-based GENERIC
mode; SLAB_TYPESAFE_BY_RCU slabs are basically always also slabs with a
->ctor, and KASAN's assign_tag() currently has to assign fixed tags for
those, reducing the effectiveness of SW_TAGS/HW_TAGS mode.
(A possible future extension of this work would be to also let SLUB call
the ->ctor() on every allocation instead of only when the slab page is
allocated; then tag-based modes would be able to assign new tags on every
reallocation.)

Signed-off-by: Jann Horn <jannh@google.com>
---
 include/linux/kasan.h | 14 ++++++----
 mm/Kconfig.debug      | 29 ++++++++++++++++++++
 mm/kasan/common.c     | 13 +++++----
 mm/kasan/kasan_test.c | 44 +++++++++++++++++++++++++++++
 mm/slab_common.c      | 12 ++++++++
 mm/slub.c             | 76 +++++++++++++++++++++++++++++++++++++++++++++------
 6 files changed, 170 insertions(+), 18 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index ebd93c843e78..c64483d3e2bd 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -186,12 +186,15 @@ static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
 }
 
 bool __kasan_slab_free(struct kmem_cache *s, void *object,
-			unsigned long ip, bool init);
+			unsigned long ip, bool init, bool after_rcu_delay);
 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
-						void *object, bool init)
+						void *object, bool init,
+						bool after_rcu_delay)
 {
-	if (kasan_enabled())
-		return __kasan_slab_free(s, object, _RET_IP_, init);
+	if (kasan_enabled()) {
+		return __kasan_slab_free(s, object, _RET_IP_, init,
+				after_rcu_delay);
+	}
 	return false;
 }
 
@@ -387,7 +390,8 @@ static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
 	return false;
 }
 
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
+				   bool init, bool after_rcu_delay)
 {
 	return false;
 }
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index afc72fde0f03..0c088532f5a7 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -70,6 +70,35 @@ config SLUB_DEBUG_ON
 	  off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
 	  "slab_debug=-".
 
+config SLUB_RCU_DEBUG
+	bool "Make use-after-free detection possible in TYPESAFE_BY_RCU caches"
+	depends on SLUB_DEBUG
+	default KASAN_GENERIC || KASAN_SW_TAGS
+	help
+	  Make SLAB_TYPESAFE_BY_RCU caches behave approximately as if the cache
+	  was not marked as SLAB_TYPESAFE_BY_RCU and every caller used
+	  kfree_rcu() instead.
+
+	  This is intended for use in combination with KASAN, to enable KASAN to
+	  detect use-after-free accesses in such caches.
+	  (KFENCE is able to do that independent of this flag.)
+
+	  This might degrade performance.
+	  Unfortunately this also prevents a very specific bug pattern from
+	  triggering (insufficient checks against an object being recycled
+	  within the RCU grace period); so this option can be turned off even on
+	  KASAN builds, in case you want to test for such a bug.
+
+	  If you're using this for testing bugs / fuzzing and care about
+	  catching all the bugs WAY more than performance, you might want to
+	  also turn on CONFIG_RCU_STRICT_GRACE_PERIOD.
+
+	  WARNING:
+	  This is designed as a debugging feature, not a security feature.
+	  Objects are sometimes recycled without RCU delay under memory pressure.
+
+	  If unsure, say N.
+
 config PAGE_OWNER
 	bool "Track page owner"
 	depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 7c7fc6ce7eb7..d92cb2e9189d 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -238,7 +238,8 @@ static enum free_validation_result check_slab_free(struct kmem_cache *cache,
 }
 
 static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
-				      unsigned long ip, bool init)
+				      unsigned long ip, bool init,
+				      bool after_rcu_delay)
 {
 	void *tagged_object = object;
 	enum free_validation_result valid = check_slab_free(cache, object, ip);
@@ -251,7 +252,8 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
 	object = kasan_reset_tag(object);
 
 	/* RCU slabs could be legally used after free within the RCU period. */
-	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
+	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU) &&
+	    !after_rcu_delay)
 		return false;
 
 	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
@@ -270,7 +272,8 @@ bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
 }
 
 bool __kasan_slab_free(struct kmem_cache *cache, void *object,
-				unsigned long ip, bool init)
+				unsigned long ip, bool init,
+				bool after_rcu_delay)
 {
 	if (is_kfence_address(object))
 		return false;
@@ -280,7 +283,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
 	 * freelist. The object will thus never be allocated again and its
 	 * metadata will never get released.
 	 */
-	if (poison_slab_object(cache, object, ip, init))
+	if (poison_slab_object(cache, object, ip, init, after_rcu_delay))
 		return true;
 
 	/*
@@ -535,7 +538,7 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
 		return false;
 
 	slab = folio_slab(folio);
-	return !poison_slab_object(slab->slab_cache, ptr, ip, false);
+	return !poison_slab_object(slab->slab_cache, ptr, ip, false, false);
 }
 
 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 7b32be2a3cf0..cba782a4b072 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -996,6 +996,49 @@ static void kmem_cache_invalid_free(struct kunit *test)
 	kmem_cache_destroy(cache);
 }
 
+static void kmem_cache_rcu_uaf(struct kunit *test)
+{
+	char *p;
+	size_t size = 200;
+	struct kmem_cache *cache;
+
+	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
+
+	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
+				  NULL);
+	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
+
+	p = kmem_cache_alloc(cache, GFP_KERNEL);
+	if (!p) {
+		kunit_err(test, "Allocation failed: %s\n", __func__);
+		kmem_cache_destroy(cache);
+		return;
+	}
+	*p = 1;
+
+	rcu_read_lock();
+
+	/* Free the object - this will internally schedule an RCU callback. */
+	kmem_cache_free(cache, p);
+
+	/* We should still be allowed to access the object at this point because
+	 * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
+	 * critical section since before the kmem_cache_free().
+	 */
+	READ_ONCE(*p);
+
+	rcu_read_unlock();
+
+	/* Wait for the RCU callback to execute; after this, the object should
+	 * have actually been freed from KASAN's perspective.
+	 */
+	rcu_barrier();
+
+	KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
+
+	kmem_cache_destroy(cache);
+}
+
 static void empty_cache_ctor(void *object) { }
 
 static void kmem_cache_double_destroy(struct kunit *test)
@@ -1937,6 +1980,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
 	KUNIT_CASE(kmem_cache_oob),
 	KUNIT_CASE(kmem_cache_double_free),
 	KUNIT_CASE(kmem_cache_invalid_free),
+	KUNIT_CASE(kmem_cache_rcu_uaf),
 	KUNIT_CASE(kmem_cache_double_destroy),
 	KUNIT_CASE(kmem_cache_accounted),
 	KUNIT_CASE(kmem_cache_bulk),
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 1560a1546bb1..19511e34017b 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -450,6 +450,18 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
 
 static int shutdown_cache(struct kmem_cache *s)
 {
+	if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
+	    (s->flags & SLAB_TYPESAFE_BY_RCU)) {
+		/*
+		 * Under CONFIG_SLUB_RCU_DEBUG, when objects in a
+		 * SLAB_TYPESAFE_BY_RCU slab are freed, SLUB will internally
+		 * defer their freeing with call_rcu().
+		 * Wait for such call_rcu() invocations here before actually
+		 * destroying the cache.
+		 */
+		rcu_barrier();
+	}
+
 	/* free asan quarantined objects */
 	kasan_cache_shutdown(s);
 
diff --git a/mm/slub.c b/mm/slub.c
index 34724704c52d..f44eec209e3e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2144,15 +2144,26 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
 }
 #endif /* CONFIG_MEMCG_KMEM */
 
+#ifdef CONFIG_SLUB_RCU_DEBUG
+static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
+
+struct rcu_delayed_free {
+	struct rcu_head head;
+	void *object;
+};
+#endif
+
 /*
  * Hooks for other subsystems that check memory allocations. In a typical
  * production configuration these hooks all should produce no code at all.
  *
  * Returns true if freeing of the object can proceed, false if its reuse
- * was delayed by KASAN quarantine, or it was returned to KFENCE.
+ * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
+ * to KFENCE.
  */
 static __always_inline
-bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
+bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
+		    bool after_rcu_delay)
 {
 	kmemleak_free_recursive(x, s->flags);
 	kmsan_slab_free(s, x);
@@ -2163,7 +2174,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
 		debug_check_no_obj_freed(x, s->object_size);
 
 	/* Use KCSAN to help debug racy use-after-free. */
-	if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
+	if (!(s->flags & SLAB_TYPESAFE_BY_RCU) || after_rcu_delay)
 		__kcsan_check_access(x, s->object_size,
 				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
 
@@ -2177,6 +2188,28 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
 	if (kasan_slab_pre_free(s, x))
 		return false;
 
+#ifdef CONFIG_SLUB_RCU_DEBUG
+	if ((s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay) {
+		struct rcu_delayed_free *delayed_free;
+
+		delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT);
+		if (delayed_free) {
+			/*
+			 * Let KASAN track our call stack as a "related work
+			 * creation", just like if the object had been freed
+			 * normally via kfree_rcu().
+			 * We have to do this manually because the rcu_head is
+			 * not located inside the object.
+			 */
+			kasan_record_aux_stack_noalloc(x);
+
+			delayed_free->object = x;
+			call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
+			return false;
+		}
+	}
+#endif /* CONFIG_SLUB_RCU_DEBUG */
+
 	/*
 	 * As memory initialization might be integrated into KASAN,
 	 * kasan_slab_free and initialization memset's must be
@@ -2200,7 +2233,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
 		       s->size - inuse - rsize);
 	}
 	/* KASAN might put x into memory quarantine, delaying its reuse. */
-	return !kasan_slab_free(s, x, init);
+	return !kasan_slab_free(s, x, init, after_rcu_delay);
 }
 
 static __fastpath_inline
@@ -2214,7 +2247,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
 	bool init;
 
 	if (is_kfence_address(next)) {
-		slab_free_hook(s, next, false);
+		slab_free_hook(s, next, false, false);
 		return false;
 	}
 
@@ -2229,7 +2262,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
 		next = get_freepointer(s, object);
 
 		/* If object's reuse doesn't have to be delayed */
-		if (likely(slab_free_hook(s, object, init))) {
+		if (likely(slab_free_hook(s, object, init, false))) {
 			/* Move object to the new freelist */
 			set_freepointer(s, object, *head);
 			*head = object;
@@ -4442,7 +4475,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
 	memcg_slab_free_hook(s, slab, &object, 1);
 	alloc_tagging_slab_free_hook(s, slab, &object, 1);
 
-	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
+	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
 		do_slab_free(s, slab, object, object, 1, addr);
 }
 
@@ -4451,7 +4484,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
 static noinline
 void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
 {
-	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
+	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
 		do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
 }
 #endif
@@ -4470,6 +4503,33 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
 		do_slab_free(s, slab, head, tail, cnt, addr);
 }
 
+#ifdef CONFIG_SLUB_RCU_DEBUG
+static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
+{
+	struct rcu_delayed_free *delayed_free =
+			container_of(rcu_head, struct rcu_delayed_free, head);
+	void *object = delayed_free->object;
+	struct slab *slab = virt_to_slab(object);
+	struct kmem_cache *s;
+
+	if (WARN_ON(is_kfence_address(rcu_head)))
+		return;
+
+	/* find the object and the cache again */
+	if (WARN_ON(!slab))
+		return;
+	s = slab->slab_cache;
+	if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
+		return;
+
+	/* resume freeing */
+	if (!slab_free_hook(s, object, slab_want_init_on_free(s), true))
+		return;
+	do_slab_free(s, slab, object, NULL, 1, _THIS_IP_);
+	kfree(delayed_free);
+}
+#endif /* CONFIG_SLUB_RCU_DEBUG */
+
 #ifdef CONFIG_KASAN_GENERIC
 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
 {

-- 
2.45.2.1089.g2a221341d9-goog
Re: [PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG
Posted by kernel test robot 1 month, 2 weeks ago

Hello,

kernel test robot noticed "WARNING:possible_circular_locking_dependency_detected" on:

commit: 17049be0e1bcf0aa8809faf84f3ddd8529cd6c4c ("[PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG")
url: https://github.com/intel-lab-lkp/linux/commits/Jann-Horn/kasan-catch-invalid-free-before-SLUB-reinitializes-the-object/20240726-045709
patch link: https://lore.kernel.org/all/20240725-kasan-tsbrcu-v3-2-51c92f8f1101@google.com/
patch subject: [PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG

in testcase: rcutorture
version: 
with following parameters:

	runtime: 300s
	test: cpuhotplug
	torture_type: tasks-rude



compiler: clang-18
test machine: qemu-system-x86_64 -enable-kvm -cpu SandyBridge -smp 2 -m 16G

(please refer to attached dmesg/kmsg for entire log/backtrace)



If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <oliver.sang@intel.com>
| Closes: https://lore.kernel.org/oe-lkp/202407291014.2ead1e72-oliver.sang@intel.com


[  136.014616][    C1] WARNING: possible circular locking dependency detected
[  136.014618][    C1] 6.10.0-00002-g17049be0e1bc #1 Not tainted
[  136.014621][    C1] ------------------------------------------------------
[  136.014622][    C1] swapper/1/0 is trying to acquire lock:
[ 136.014625][ C1] ffffffff868f04a0 (console_owner){-.-.}-{0:0}, at: console_flush_all (include/linux/rcupdate.h:? include/linux/srcu.h:232 kernel/printk/printk.c:286 kernel/printk/printk.c:2971) 
[  136.014668][    C1]
[  136.014668][    C1] but task is already holding lock:
[ 136.014670][ C1] ffff888102658518 (&n->list_lock){-.-.}-{2:2}, at: free_to_partial_list (mm/slub.c:?) 
[  136.014685][    C1]
[  136.014685][    C1] which lock already depends on the new lock.
[  136.014685][    C1]
[  136.014687][    C1]
[  136.014687][    C1] the existing dependency chain (in reverse order) is:
[  136.014688][    C1]
[  136.014688][    C1] -> #4 (&n->list_lock){-.-.}-{2:2}:
[ 136.014694][ C1] _raw_spin_lock_irqsave (include/linux/spinlock_api_smp.h:110 kernel/locking/spinlock.c:162) 
[ 136.014704][ C1] ___slab_alloc (mm/slub.c:2717) 
[ 136.014709][ C1] kmem_cache_alloc_noprof (mm/slub.c:3797 mm/slub.c:3850 mm/slub.c:4030 mm/slub.c:4049) 
[ 136.014712][ C1] debug_objects_fill_pool (lib/debugobjects.c:168) 
[ 136.014717][ C1] debug_object_activate (lib/debugobjects.c:492 lib/debugobjects.c:706) 
[ 136.014721][ C1] enqueue_hrtimer (arch/x86/include/asm/jump_label.h:27 include/linux/jump_label.h:207 include/trace/events/timer.h:222 kernel/time/hrtimer.c:479 kernel/time/hrtimer.c:1085) 
[ 136.014726][ C1] hrtimer_start_range_ns (kernel/time/hrtimer.c:1302) 
[ 136.014730][ C1] __enqueue_rt_entity (kernel/sched/rt.c:122) 
[ 136.014735][ C1] enqueue_task_rt (kernel/sched/rt.c:1453) 
[ 136.014738][ C1] __sched_setscheduler (kernel/sched/core.c:?) 
[ 136.014742][ C1] sched_set_fifo (kernel/sched/core.c:8024) 
[ 136.014745][ C1] drm_vblank_worker_init (drivers/gpu/drm/drm_vblank_work.c:?) 
[ 136.014750][ C1] drm_vblank_init (drivers/gpu/drm/drm_vblank.c:555) 
[ 136.014755][ C1] vkms_init (drivers/gpu/drm/vkms/vkms_drv.c:210) 
[ 136.014759][ C1] do_one_initcall (init/main.c:1267) 
[ 136.014762][ C1] do_initcall_level (init/main.c:1328) 
[ 136.014766][ C1] do_initcalls (init/main.c:1342) 
[ 136.014769][ C1] kernel_init_freeable (init/main.c:1582) 
[ 136.014772][ C1] kernel_init (init/main.c:1469) 
[ 136.014776][ C1] ret_from_fork (arch/x86/kernel/process.c:153) 
[ 136.014783][ C1] ret_from_fork_asm (arch/x86/entry/entry_64.S:257) 
[  136.014786][    C1]
[  136.014786][    C1] -> #3 (hrtimer_bases.lock){-.-.}-{2:2}:
[ 136.014792][ C1] _raw_spin_lock_irqsave (include/linux/spinlock_api_smp.h:110 kernel/locking/spinlock.c:162) 
[ 136.014798][ C1] hrtimer_start_range_ns (kernel/time/hrtimer.c:?) 
[ 136.014801][ C1] rpm_suspend (drivers/base/power/runtime.c:?) 
[ 136.014807][ C1] rpm_resume (drivers/base/power/runtime.c:?) 
[ 136.014810][ C1] pm_runtime_work (drivers/base/power/runtime.c:?) 
[ 136.014814][ C1] process_scheduled_works (kernel/workqueue.c:?) 
[ 136.014818][ C1] worker_thread (include/linux/list.h:373 kernel/workqueue.c:947 kernel/workqueue.c:3410) 
[ 136.014820][ C1] kthread (kernel/kthread.c:391) 
[ 136.014824][ C1] ret_from_fork (arch/x86/kernel/process.c:153) 
[ 136.014826][ C1] ret_from_fork_asm (arch/x86/entry/entry_64.S:257) 
[  136.014832][    C1]
[  136.014832][    C1] -> #2 (&dev->power.lock){-.-.}-{2:2}:
[ 136.014837][ C1] _raw_spin_lock_irqsave (include/linux/spinlock_api_smp.h:110 kernel/locking/spinlock.c:162) 
[ 136.014840][ C1] __pm_runtime_resume (drivers/base/power/runtime.c:1171) 
[ 136.014843][ C1] __uart_start (drivers/tty/serial/serial_core.c:149) 
[ 136.014849][ C1] uart_write (include/linux/spinlock.h:406 include/linux/serial_core.h:669 drivers/tty/serial/serial_core.c:634) 
[ 136.014851][ C1] n_tty_write (drivers/tty/n_tty.c:574 drivers/tty/n_tty.c:2389) 
[ 136.014855][ C1] file_tty_write (drivers/tty/tty_io.c:1021) 
[ 136.014859][ C1] do_iter_readv_writev (fs/read_write.c:742) 
[ 136.014864][ C1] vfs_writev (fs/read_write.c:971) 
[ 136.014867][ C1] do_writev (fs/read_write.c:1018) 
[ 136.014870][ C1] __do_fast_syscall_32 (arch/x86/entry/common.c:?) 
[ 136.014874][ C1] do_fast_syscall_32 (arch/x86/entry/common.c:411) 
[ 136.014877][ C1] entry_SYSENTER_compat_after_hwframe (arch/x86/entry/entry_64_compat.S:127) 
[  136.014883][    C1]
[  136.014883][    C1] -> #1 (&port_lock_key){-.-.}-{2:2}:
[ 136.014888][ C1] _raw_spin_lock_irqsave (include/linux/spinlock_api_smp.h:110 kernel/locking/spinlock.c:162) 
[ 136.014891][ C1] serial8250_console_write (include/linux/serial_core.h:? drivers/tty/serial/8250/8250_port.c:3352) 
[ 136.014894][ C1] console_flush_all (kernel/printk/printk.c:2917) 
[ 136.014897][ C1] console_unlock (kernel/printk/printk.c:3048) 
[ 136.014900][ C1] vprintk_emit (kernel/printk/printk.c:?) 
[ 136.014903][ C1] _printk (kernel/printk/printk.c:2376) 
[ 136.014907][ C1] register_console (kernel/printk/printk.c:3537) 
[ 136.014910][ C1] univ8250_console_init (drivers/tty/serial/8250/8250_core.c:?) 
[ 136.014914][ C1] console_init (arch/x86/include/asm/jump_label.h:27 include/linux/jump_label.h:207 include/trace/events/initcall.h:48 kernel/printk/printk.c:3728) 
[ 136.014919][ C1] start_kernel (init/main.c:1039) 
[ 136.014922][ C1] x86_64_start_reservations (??:?) 
[ 136.014926][ C1] x86_64_start_kernel (??:?) 
[ 136.014931][ C1] common_startup_64 (arch/x86/kernel/head_64.S:421) 
[  136.014935][    C1]
[  136.014935][    C1] -> #0 (console_owner){-.-.}-{0:0}:
[ 136.014940][ C1] __lock_acquire (kernel/locking/lockdep.c:3135) 
[ 136.014947][ C1] lock_acquire (kernel/locking/lockdep.c:5754) 
[ 136.014951][ C1] console_flush_all (kernel/printk/printk.c:1873) 
[ 136.014955][ C1] console_unlock (kernel/printk/printk.c:3048) 
[ 136.014958][ C1] vprintk_emit (kernel/printk/printk.c:?) 
[ 136.014961][ C1] _printk (kernel/printk/printk.c:2376) 
[ 136.014964][ C1] slab_bug (mm/slub.c:1030) 
[ 136.014968][ C1] slab_err (mm/slub.c:967 mm/slub.c:1131) 
[ 136.014970][ C1] free_to_partial_list (mm/slub.c:3337) 
[ 136.014974][ C1] slab_free_after_rcu_debug (mm/slub.c:? mm/slub.c:4528) 
[ 136.014978][ C1] rcu_do_batch (include/linux/rcupdate.h:339 kernel/rcu/tree.c:2537) 
[ 136.014984][ C1] rcu_core (kernel/rcu/tree.c:2811) 
[ 136.014987][ C1] handle_softirqs (arch/x86/include/asm/jump_label.h:27 include/linux/jump_label.h:207 include/trace/events/irq.h:142 kernel/softirq.c:555) 
[ 136.014991][ C1] __irq_exit_rcu (kernel/softirq.c:617 kernel/softirq.c:639) 
[ 136.014994][ C1] irq_exit_rcu (kernel/softirq.c:651) 
[ 136.014996][ C1] sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1043) 
[ 136.015000][ C1] asm_sysvec_apic_timer_interrupt (arch/x86/include/asm/idtentry.h:702) 
[ 136.015003][ C1] default_idle (arch/x86/include/asm/irqflags.h:37 arch/x86/include/asm/irqflags.h:72 arch/x86/kernel/process.c:743) 
[ 136.015007][ C1] default_idle_call (include/linux/cpuidle.h:143 kernel/sched/idle.c:118) 
[ 136.015010][ C1] do_idle (kernel/sched/idle.c:? kernel/sched/idle.c:332) 
[ 136.015014][ C1] cpu_startup_entry (kernel/sched/idle.c:429) 
[ 136.015017][ C1] start_secondary (arch/x86/kernel/smpboot.c:313) 
[ 136.015021][ C1] common_startup_64 (arch/x86/kernel/head_64.S:421) 
[  136.015024][    C1]
[  136.015024][    C1] other info that might help us debug this:
[  136.015024][    C1]
[  136.015025][    C1] Chain exists of:
[  136.015025][    C1]   console_owner --> hrtimer_bases.lock --> &n->list_lock
[  136.015025][    C1]
[  136.015031][    C1]  Possible unsafe locking scenario:
[  136.015031][    C1]
[  136.015032][    C1]        CPU0                    CPU1
[  136.015033][    C1]        ----                    ----
[  136.015034][    C1]   lock(&n->list_lock);
[  136.015037][    C1]                                lock(hrtimer_bases.lock);
[  136.015039][    C1]                                lock(&n->list_lock);
[  136.015042][    C1]   lock(console_owner);
[  136.015044][    C1]
[  136.015044][    C1]  *** DEADLOCK ***
[  136.015044][    C1]
[  136.015045][    C1] 4 locks held by swapper/1/0:
[ 136.015048][ C1] #0: ffffffff868fb5a0 (rcu_callback){....}-{0:0}, at: rcu_do_batch (kernel/rcu/tree.c:?) 
[ 136.015057][ C1] #1: ffff888102658518 (&n->list_lock){-.-.}-{2:2}, at: free_to_partial_list (mm/slub.c:?) 
[ 136.015065][ C1] #2: ffffffff864ffd60 (console_lock){+.+.}-{0:0}, at: _printk (kernel/printk/printk.c:2376) 
[ 136.015075][ C1] #3: ffffffff864ff950 (console_srcu){....}-{0:0}, at: console_flush_all (include/linux/rcupdate.h:? include/linux/srcu.h:232 kernel/printk/printk.c:286 kernel/printk/printk.c:2971) 
[  136.015086][    C1]
[  136.015086][    C1] stack backtrace:
[  136.015088][    C1] CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.10.0-00002-g17049be0e1bc #1
[  136.015093][    C1] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
[  136.015099][    C1] Call Trace:
[  136.015102][    C1]  <IRQ>
[ 136.015104][ C1] dump_stack_lvl (lib/dump_stack.c:116) 
[ 136.015109][ C1] check_noncircular (kernel/locking/lockdep.c:?) 
[ 136.015115][ C1] __lock_acquire (kernel/locking/lockdep.c:3135) 
[ 136.015126][ C1] lock_acquire (kernel/locking/lockdep.c:5754) 
[ 136.015133][ C1] ? console_flush_all (include/linux/rcupdate.h:? include/linux/srcu.h:232 kernel/printk/printk.c:286 kernel/printk/printk.c:2971) 
[ 136.015138][ C1] ? do_raw_spin_unlock (arch/x86/include/asm/atomic.h:23) 
[ 136.015143][ C1] console_flush_all (kernel/printk/printk.c:1873) 
[ 136.015147][ C1] ? console_flush_all (include/linux/rcupdate.h:? include/linux/srcu.h:232 kernel/printk/printk.c:286 kernel/printk/printk.c:2971) 
[ 136.015152][ C1] ? console_flush_all (include/linux/rcupdate.h:? include/linux/srcu.h:232 kernel/printk/printk.c:286 kernel/printk/printk.c:2971) 
[ 136.015158][ C1] console_unlock (kernel/printk/printk.c:3048) 
[ 136.015163][ C1] vprintk_emit (kernel/printk/printk.c:?) 
[ 136.015168][ C1] _printk (kernel/printk/printk.c:2376) 
[ 136.015173][ C1] ? __asan_memcpy (mm/kasan/shadow.c:105) 
[ 136.015178][ C1] slab_bug (mm/slub.c:1030) 
[ 136.015184][ C1] slab_err (mm/slub.c:967 mm/slub.c:1131) 
[ 136.015192][ C1] free_to_partial_list (mm/slub.c:3337) 
[ 136.015197][ C1] ? slab_free_after_rcu_debug (mm/slub.c:4423 mm/slub.c:4528) 
[ 136.015202][ C1] ? rcu_do_batch (include/linux/rcupdate.h:339 kernel/rcu/tree.c:2537) 
[ 136.015206][ C1] slab_free_after_rcu_debug (mm/slub.c:? mm/slub.c:4528) 
[ 136.015211][ C1] ? rcu_do_batch (kernel/rcu/tree.c:?) 
[ 136.015214][ C1] ? __cfi_slab_free_after_rcu_debug (mm/slub.c:4508) 


The kernel config and materials to reproduce are available at:
https://download.01.org/0day-ci/archive/20240729/202407291014.2ead1e72-oliver.sang@intel.com



-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Re: [PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG
Posted by Jann Horn 1 month, 2 weeks ago
On Mon, Jul 29, 2024 at 6:37 AM kernel test robot <oliver.sang@intel.com> wrote:
> kernel test robot noticed "WARNING:possible_circular_locking_dependency_detected" on:
>
> commit: 17049be0e1bcf0aa8809faf84f3ddd8529cd6c4c ("[PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG")
> url: https://github.com/intel-lab-lkp/linux/commits/Jann-Horn/kasan-catch-invalid-free-before-SLUB-reinitializes-the-object/20240726-045709
> patch link: https://lore.kernel.org/all/20240725-kasan-tsbrcu-v3-2-51c92f8f1101@google.com/
> patch subject: [PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG
[...]
> [  136.014616][    C1] WARNING: possible circular locking dependency detected

Looking at the linked dmesg, the primary thing that actually went
wrong here is something in the SLUB bulk freeing code, we got multiple
messages like:

```
 BUG filp (Not tainted): Bulk free expected 1 objects but found 2

 -----------------------------------------------------------------------------

 Slab 0xffffea0005251f00 objects=23 used=23 fp=0x0000000000000000
flags=0x8000000000000040(head|zone=2)
 CPU: 1 PID: 0 Comm: swapper/1 Not tainted 6.10.0-00002-g17049be0e1bc #1
 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.16.2-debian-1.16.2-1 04/01/2014
 Call Trace:
  <IRQ>
  dump_stack_lvl+0xa3/0x100
  slab_err+0x15a/0x200
  free_to_partial_list+0x2c9/0x600
[...]
  slab_free_after_rcu_debug+0x169/0x280
[...]
  rcu_do_batch+0x4a4/0xc40
  rcu_core+0x36e/0x5c0
  handle_softirqs+0x211/0x800
[...]
  __irq_exit_rcu+0x71/0x100
  irq_exit_rcu+0x5/0x80
  sysvec_apic_timer_interrupt+0x68/0x80
  </IRQ>
  <TASK>
  asm_sysvec_apic_timer_interrupt+0x16/0x40
 RIP: 0010:default_idle+0xb/0x40
 Code: 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90
90 90 90 90 90 90 90 90 90 90 eb 07 0f 00 2d 17 ae 32 00 fb f4 <fa> c3
cc cc cc cc cc 66 66 66 66 66 66 2e 0f 1f 84 00 00 00 00 00
 RSP: 0018:ffff888104e5feb8 EFLAGS: 00200282
 RAX: 4c16e5d04752e300 RBX: ffffffff813578df RCX: 0000000000995661
 RDX: 0000000000000001 RSI: 0000000000000004 RDI: ffffffff813578df
 RBP: 0000000000000001 R08: ffff8883aebf6cdb R09: 1ffff11075d7ed9b
 R10: dffffc0000000000 R11: ffffed1075d7ed9c R12: 0000000000000000
 R13: 1ffff110209ca008 R14: ffffffff87474e68 R15: dffffc0000000000
  ? do_idle+0x15f/0x400
  default_idle_call+0x6e/0x100
  do_idle+0x15f/0x400
  cpu_startup_entry+0x40/0x80
  start_secondary+0x129/0x180
  common_startup_64+0x129/0x1a7
  </TASK>
 FIX filp: Object at 0xffff88814947e400 not freed
```

Ah, the issue is that I'm NULL as the tail pointer to do_slab_free()
instead of passing in the pointer to the object again. That's the
result of not being careful enough while forward-porting my patch from
last year, it conflicted with vbabka's commit 284f17ac13fe ("mm/slub:
handle bulk and single object freeing separately")... I'll fix that up
in the next version.


I don't think the lockdep warning is caused by code I introduced, it's
just that you can only hit that warning when SLUB does printk...

> The kernel config and materials to reproduce are available at:
> https://download.01.org/0day-ci/archive/20240729/202407291014.2ead1e72-oliver.sang@intel.com
Re: [PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG
Posted by Andrey Konovalov 1 month, 3 weeks ago
On Thu, Jul 25, 2024 at 5:32 PM Jann Horn <jannh@google.com> wrote:
>
> Currently, KASAN is unable to catch use-after-free in SLAB_TYPESAFE_BY_RCU
> slabs because use-after-free is allowed within the RCU grace period by
> design.
>
> Add a SLUB debugging feature which RCU-delays every individual
> kmem_cache_free() before either actually freeing the object or handing it
> off to KASAN, and change KASAN to poison freed objects as normal when this
> option is enabled.
>
> For now I've configured Kconfig.debug to default-enable this feature in the
> KASAN GENERIC and SW_TAGS modes; I'm not enabling it by default in HW_TAGS
> mode because I'm not sure if it might have unwanted performance degradation
> effects there.
>
> Note that this is mostly useful with KASAN in the quarantine-based GENERIC
> mode; SLAB_TYPESAFE_BY_RCU slabs are basically always also slabs with a
> ->ctor, and KASAN's assign_tag() currently has to assign fixed tags for
> those, reducing the effectiveness of SW_TAGS/HW_TAGS mode.
> (A possible future extension of this work would be to also let SLUB call
> the ->ctor() on every allocation instead of only when the slab page is
> allocated; then tag-based modes would be able to assign new tags on every
> reallocation.)
>
> Signed-off-by: Jann Horn <jannh@google.com>

Acked-by: Andrey Konovalov <andreyknvl@gmail.com>

But see some nits below.

Thank you!

> ---
>  include/linux/kasan.h | 14 ++++++----
>  mm/Kconfig.debug      | 29 ++++++++++++++++++++
>  mm/kasan/common.c     | 13 +++++----
>  mm/kasan/kasan_test.c | 44 +++++++++++++++++++++++++++++
>  mm/slab_common.c      | 12 ++++++++
>  mm/slub.c             | 76 +++++++++++++++++++++++++++++++++++++++++++++------
>  6 files changed, 170 insertions(+), 18 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index ebd93c843e78..c64483d3e2bd 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -186,12 +186,15 @@ static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
>  }
>
>  bool __kasan_slab_free(struct kmem_cache *s, void *object,
> -                       unsigned long ip, bool init);
> +                       unsigned long ip, bool init, bool after_rcu_delay);
>  static __always_inline bool kasan_slab_free(struct kmem_cache *s,
> -                                               void *object, bool init)
> +                                               void *object, bool init,
> +                                               bool after_rcu_delay)
>  {
> -       if (kasan_enabled())
> -               return __kasan_slab_free(s, object, _RET_IP_, init);
> +       if (kasan_enabled()) {
> +               return __kasan_slab_free(s, object, _RET_IP_, init,
> +                               after_rcu_delay);
> +       }
>         return false;
>  }
>
> @@ -387,7 +390,8 @@ static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
>         return false;
>  }
>
> -static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
> +static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
> +                                  bool init, bool after_rcu_delay)
>  {
>         return false;
>  }
> diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
> index afc72fde0f03..0c088532f5a7 100644
> --- a/mm/Kconfig.debug
> +++ b/mm/Kconfig.debug
> @@ -70,6 +70,35 @@ config SLUB_DEBUG_ON
>           off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
>           "slab_debug=-".
>
> +config SLUB_RCU_DEBUG
> +       bool "Make use-after-free detection possible in TYPESAFE_BY_RCU caches"

Perhaps, it makes sense to point out that is related to KASAN's
use-after-free detection in the option description.

> +       depends on SLUB_DEBUG

Do we need depends on KASAN?

> +       default KASAN_GENERIC || KASAN_SW_TAGS
> +       help
> +         Make SLAB_TYPESAFE_BY_RCU caches behave approximately as if the cache
> +         was not marked as SLAB_TYPESAFE_BY_RCU and every caller used
> +         kfree_rcu() instead.
> +
> +         This is intended for use in combination with KASAN, to enable KASAN to
> +         detect use-after-free accesses in such caches.
> +         (KFENCE is able to do that independent of this flag.)
> +
> +         This might degrade performance.
> +         Unfortunately this also prevents a very specific bug pattern from
> +         triggering (insufficient checks against an object being recycled
> +         within the RCU grace period); so this option can be turned off even on
> +         KASAN builds, in case you want to test for such a bug.
> +
> +         If you're using this for testing bugs / fuzzing and care about
> +         catching all the bugs WAY more than performance, you might want to
> +         also turn on CONFIG_RCU_STRICT_GRACE_PERIOD.
> +
> +         WARNING:
> +         This is designed as a debugging feature, not a security feature.
> +         Objects are sometimes recycled without RCU delay under memory pressure.
> +
> +         If unsure, say N.
> +
>  config PAGE_OWNER
>         bool "Track page owner"
>         depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 7c7fc6ce7eb7..d92cb2e9189d 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -238,7 +238,8 @@ static enum free_validation_result check_slab_free(struct kmem_cache *cache,
>  }
>
>  static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> -                                     unsigned long ip, bool init)
> +                                     unsigned long ip, bool init,
> +                                     bool after_rcu_delay)
>  {
>         void *tagged_object = object;
>         enum free_validation_result valid = check_slab_free(cache, object, ip);
> @@ -251,7 +252,8 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
>         object = kasan_reset_tag(object);
>
>         /* RCU slabs could be legally used after free within the RCU period. */
> -       if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
> +       if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU) &&
> +           !after_rcu_delay)

This can be kept on the same line.

>                 return false;
>
>         kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
> @@ -270,7 +272,8 @@ bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
>  }
>
>  bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> -                               unsigned long ip, bool init)
> +                               unsigned long ip, bool init,
> +                               bool after_rcu_delay)
>  {
>         if (is_kfence_address(object))
>                 return false;
> @@ -280,7 +283,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
>          * freelist. The object will thus never be allocated again and its
>          * metadata will never get released.
>          */
> -       if (poison_slab_object(cache, object, ip, init))
> +       if (poison_slab_object(cache, object, ip, init, after_rcu_delay))
>                 return true;
>
>         /*
> @@ -535,7 +538,7 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
>                 return false;
>
>         slab = folio_slab(folio);
> -       return !poison_slab_object(slab->slab_cache, ptr, ip, false);
> +       return !poison_slab_object(slab->slab_cache, ptr, ip, false, false);
>  }
>
>  void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
> diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> index 7b32be2a3cf0..cba782a4b072 100644
> --- a/mm/kasan/kasan_test.c
> +++ b/mm/kasan/kasan_test.c
> @@ -996,6 +996,49 @@ static void kmem_cache_invalid_free(struct kunit *test)
>         kmem_cache_destroy(cache);
>  }
>
> +static void kmem_cache_rcu_uaf(struct kunit *test)
> +{
> +       char *p;
> +       size_t size = 200;
> +       struct kmem_cache *cache;
> +
> +       KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
> +
> +       cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
> +                                 NULL);
> +       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
> +
> +       p = kmem_cache_alloc(cache, GFP_KERNEL);
> +       if (!p) {
> +               kunit_err(test, "Allocation failed: %s\n", __func__);
> +               kmem_cache_destroy(cache);
> +               return;
> +       }
> +       *p = 1;
> +
> +       rcu_read_lock();
> +
> +       /* Free the object - this will internally schedule an RCU callback. */
> +       kmem_cache_free(cache, p);
> +
> +       /* We should still be allowed to access the object at this point because

Empty line after /* here and below.



> +        * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
> +        * critical section since before the kmem_cache_free().
> +        */
> +       READ_ONCE(*p);
> +
> +       rcu_read_unlock();
> +
> +       /* Wait for the RCU callback to execute; after this, the object should
> +        * have actually been freed from KASAN's perspective.
> +        */
> +       rcu_barrier();
> +
> +       KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
> +
> +       kmem_cache_destroy(cache);
> +}
> +
>  static void empty_cache_ctor(void *object) { }
>
>  static void kmem_cache_double_destroy(struct kunit *test)
> @@ -1937,6 +1980,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
>         KUNIT_CASE(kmem_cache_oob),
>         KUNIT_CASE(kmem_cache_double_free),
>         KUNIT_CASE(kmem_cache_invalid_free),
> +       KUNIT_CASE(kmem_cache_rcu_uaf),
>         KUNIT_CASE(kmem_cache_double_destroy),
>         KUNIT_CASE(kmem_cache_accounted),
>         KUNIT_CASE(kmem_cache_bulk),
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 1560a1546bb1..19511e34017b 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -450,6 +450,18 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
>
>  static int shutdown_cache(struct kmem_cache *s)
>  {
> +       if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
> +           (s->flags & SLAB_TYPESAFE_BY_RCU)) {
> +               /*
> +                * Under CONFIG_SLUB_RCU_DEBUG, when objects in a
> +                * SLAB_TYPESAFE_BY_RCU slab are freed, SLUB will internally
> +                * defer their freeing with call_rcu().
> +                * Wait for such call_rcu() invocations here before actually
> +                * destroying the cache.
> +                */
> +               rcu_barrier();
> +       }
> +
>         /* free asan quarantined objects */
>         kasan_cache_shutdown(s);
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 34724704c52d..f44eec209e3e 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2144,15 +2144,26 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
>  }
>  #endif /* CONFIG_MEMCG_KMEM */
>
> +#ifdef CONFIG_SLUB_RCU_DEBUG
> +static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
> +
> +struct rcu_delayed_free {
> +       struct rcu_head head;
> +       void *object;
> +};
> +#endif
> +
>  /*
>   * Hooks for other subsystems that check memory allocations. In a typical
>   * production configuration these hooks all should produce no code at all.
>   *
>   * Returns true if freeing of the object can proceed, false if its reuse
> - * was delayed by KASAN quarantine, or it was returned to KFENCE.
> + * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
> + * to KFENCE.
>   */
>  static __always_inline
> -bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
> +bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
> +                   bool after_rcu_delay)
>  {
>         kmemleak_free_recursive(x, s->flags);
>         kmsan_slab_free(s, x);
> @@ -2163,7 +2174,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
>                 debug_check_no_obj_freed(x, s->object_size);
>
>         /* Use KCSAN to help debug racy use-after-free. */
> -       if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
> +       if (!(s->flags & SLAB_TYPESAFE_BY_RCU) || after_rcu_delay)
>                 __kcsan_check_access(x, s->object_size,
>                                      KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
>
> @@ -2177,6 +2188,28 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
>         if (kasan_slab_pre_free(s, x))
>                 return false;
>
> +#ifdef CONFIG_SLUB_RCU_DEBUG
> +       if ((s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay) {
> +               struct rcu_delayed_free *delayed_free;
> +
> +               delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT);
> +               if (delayed_free) {
> +                       /*
> +                        * Let KASAN track our call stack as a "related work
> +                        * creation", just like if the object had been freed
> +                        * normally via kfree_rcu().
> +                        * We have to do this manually because the rcu_head is
> +                        * not located inside the object.
> +                        */
> +                       kasan_record_aux_stack_noalloc(x);
> +
> +                       delayed_free->object = x;
> +                       call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
> +                       return false;
> +               }
> +       }
> +#endif /* CONFIG_SLUB_RCU_DEBUG */
> +
>         /*
>          * As memory initialization might be integrated into KASAN,
>          * kasan_slab_free and initialization memset's must be
> @@ -2200,7 +2233,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
>                        s->size - inuse - rsize);
>         }
>         /* KASAN might put x into memory quarantine, delaying its reuse. */
> -       return !kasan_slab_free(s, x, init);
> +       return !kasan_slab_free(s, x, init, after_rcu_delay);
>  }
>
>  static __fastpath_inline
> @@ -2214,7 +2247,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
>         bool init;
>
>         if (is_kfence_address(next)) {
> -               slab_free_hook(s, next, false);
> +               slab_free_hook(s, next, false, false);
>                 return false;
>         }
>
> @@ -2229,7 +2262,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
>                 next = get_freepointer(s, object);
>
>                 /* If object's reuse doesn't have to be delayed */
> -               if (likely(slab_free_hook(s, object, init))) {
> +               if (likely(slab_free_hook(s, object, init, false))) {
>                         /* Move object to the new freelist */
>                         set_freepointer(s, object, *head);
>                         *head = object;
> @@ -4442,7 +4475,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
>         memcg_slab_free_hook(s, slab, &object, 1);
>         alloc_tagging_slab_free_hook(s, slab, &object, 1);
>
> -       if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
> +       if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
>                 do_slab_free(s, slab, object, object, 1, addr);
>  }
>
> @@ -4451,7 +4484,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
>  static noinline
>  void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
>  {
> -       if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
> +       if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
>                 do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
>  }
>  #endif
> @@ -4470,6 +4503,33 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
>                 do_slab_free(s, slab, head, tail, cnt, addr);
>  }
>
> +#ifdef CONFIG_SLUB_RCU_DEBUG
> +static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
> +{
> +       struct rcu_delayed_free *delayed_free =
> +                       container_of(rcu_head, struct rcu_delayed_free, head);
> +       void *object = delayed_free->object;
> +       struct slab *slab = virt_to_slab(object);
> +       struct kmem_cache *s;
> +
> +       if (WARN_ON(is_kfence_address(rcu_head)))
> +               return;
> +
> +       /* find the object and the cache again */
> +       if (WARN_ON(!slab))
> +               return;
> +       s = slab->slab_cache;
> +       if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
> +               return;
> +
> +       /* resume freeing */
> +       if (!slab_free_hook(s, object, slab_want_init_on_free(s), true))
> +               return;
> +       do_slab_free(s, slab, object, NULL, 1, _THIS_IP_);
> +       kfree(delayed_free);
> +}
> +#endif /* CONFIG_SLUB_RCU_DEBUG */
> +
>  #ifdef CONFIG_KASAN_GENERIC
>  void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
>  {
>
> --
> 2.45.2.1089.g2a221341d9-goog
>
Re: [PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG
Posted by Jann Horn 1 month, 3 weeks ago
On Fri, Jul 26, 2024 at 2:44 AM Andrey Konovalov <andreyknvl@gmail.com> wrote:
> On Thu, Jul 25, 2024 at 5:32 PM Jann Horn <jannh@google.com> wrote:
> >
> > Currently, KASAN is unable to catch use-after-free in SLAB_TYPESAFE_BY_RCU
> > slabs because use-after-free is allowed within the RCU grace period by
> > design.
> >
> > Add a SLUB debugging feature which RCU-delays every individual
> > kmem_cache_free() before either actually freeing the object or handing it
> > off to KASAN, and change KASAN to poison freed objects as normal when this
> > option is enabled.
[...]
> > diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
> > index afc72fde0f03..0c088532f5a7 100644
> > --- a/mm/Kconfig.debug
> > +++ b/mm/Kconfig.debug
> > @@ -70,6 +70,35 @@ config SLUB_DEBUG_ON
> >           off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
> >           "slab_debug=-".
> >
> > +config SLUB_RCU_DEBUG
> > +       bool "Make use-after-free detection possible in TYPESAFE_BY_RCU caches"
>
> Perhaps, it makes sense to point out that is related to KASAN's
> use-after-free detection in the option description.

Hmm, yeah, maybe I'll change it to
"Enable UAF detection in TYPESAFE_BY_RCU caches (for KASAN)"
and then we can change that in the future if the feature becomes
usable with other SLUB stuff.

> > +       depends on SLUB_DEBUG
>
> Do we need depends on KASAN?

My original thinking was: The feature is supposed to work basically
independently of KASAN. It doesn't currently do anything useful
without KASAN, but if we do something about constructor slabs in the
future, this should make it possible to let SLUB poison freed objects.
(Though that might also require going back to deterministically
RCU-delaying the freeing of objects in the future...)

But yeah, I guess for now the config option is useless without KASAN,
so it's reasonable to make it depend on KASAN for now. I'll change it
that way.

> > +       default KASAN_GENERIC || KASAN_SW_TAGS
> > +       help
> > +         Make SLAB_TYPESAFE_BY_RCU caches behave approximately as if the cache
> > +         was not marked as SLAB_TYPESAFE_BY_RCU and every caller used
> > +         kfree_rcu() instead.
> > +
> > +         This is intended for use in combination with KASAN, to enable KASAN to
> > +         detect use-after-free accesses in such caches.
> > +         (KFENCE is able to do that independent of this flag.)
> > +
> > +         This might degrade performance.
> > +         Unfortunately this also prevents a very specific bug pattern from
> > +         triggering (insufficient checks against an object being recycled
> > +         within the RCU grace period); so this option can be turned off even on
> > +         KASAN builds, in case you want to test for such a bug.
> > +
> > +         If you're using this for testing bugs / fuzzing and care about
> > +         catching all the bugs WAY more than performance, you might want to
> > +         also turn on CONFIG_RCU_STRICT_GRACE_PERIOD.
> > +
> > +         WARNING:
> > +         This is designed as a debugging feature, not a security feature.
> > +         Objects are sometimes recycled without RCU delay under memory pressure.
> > +
> > +         If unsure, say N.
> > +
> >  config PAGE_OWNER
> >         bool "Track page owner"
> >         depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
> > diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> > index 7c7fc6ce7eb7..d92cb2e9189d 100644
> > --- a/mm/kasan/common.c
> > +++ b/mm/kasan/common.c
> > @@ -238,7 +238,8 @@ static enum free_validation_result check_slab_free(struct kmem_cache *cache,
> >  }
> >
> >  static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> > -                                     unsigned long ip, bool init)
> > +                                     unsigned long ip, bool init,
> > +                                     bool after_rcu_delay)
> >  {
> >         void *tagged_object = object;
> >         enum free_validation_result valid = check_slab_free(cache, object, ip);
> > @@ -251,7 +252,8 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> >         object = kasan_reset_tag(object);
> >
> >         /* RCU slabs could be legally used after free within the RCU period. */
> > -       if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
> > +       if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU) &&
> > +           !after_rcu_delay)
>
> This can be kept on the same line.

ack, I'll change that

[...]
> > +       /* Free the object - this will internally schedule an RCU callback. */
> > +       kmem_cache_free(cache, p);
> > +
> > +       /* We should still be allowed to access the object at this point because
>
> Empty line after /* here and below.

ack, I'll change that
Re: [PATCH v3 2/2] slub: Introduce CONFIG_SLUB_RCU_DEBUG
Posted by Vlastimil Babka 1 month, 3 weeks ago
On 7/25/24 5:31 PM, Jann Horn wrote:
> Currently, KASAN is unable to catch use-after-free in SLAB_TYPESAFE_BY_RCU
> slabs because use-after-free is allowed within the RCU grace period by
> design.
> 
> Add a SLUB debugging feature which RCU-delays every individual
> kmem_cache_free() before either actually freeing the object or handing it
> off to KASAN, and change KASAN to poison freed objects as normal when this
> option is enabled.
> 
> For now I've configured Kconfig.debug to default-enable this feature in the
> KASAN GENERIC and SW_TAGS modes; I'm not enabling it by default in HW_TAGS
> mode because I'm not sure if it might have unwanted performance degradation
> effects there.
> 
> Note that this is mostly useful with KASAN in the quarantine-based GENERIC
> mode; SLAB_TYPESAFE_BY_RCU slabs are basically always also slabs with a
> ->ctor, and KASAN's assign_tag() currently has to assign fixed tags for
> those, reducing the effectiveness of SW_TAGS/HW_TAGS mode.
> (A possible future extension of this work would be to also let SLUB call
> the ->ctor() on every allocation instead of only when the slab page is
> allocated; then tag-based modes would be able to assign new tags on every
> reallocation.)
> 
> Signed-off-by: Jann Horn <jannh@google.com>

Yeah this "we try but might fail" looks to be a suitable tradeoff for this
debuggin feature in that it keeps the complexity lower. Thanks.

Acked-by: Vlastimil Babka <vbabka@suse.cz> #slab

> ---
>  include/linux/kasan.h | 14 ++++++----
>  mm/Kconfig.debug      | 29 ++++++++++++++++++++
>  mm/kasan/common.c     | 13 +++++----
>  mm/kasan/kasan_test.c | 44 +++++++++++++++++++++++++++++
>  mm/slab_common.c      | 12 ++++++++
>  mm/slub.c             | 76 +++++++++++++++++++++++++++++++++++++++++++++------
>  6 files changed, 170 insertions(+), 18 deletions(-)
> 
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index ebd93c843e78..c64483d3e2bd 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -186,12 +186,15 @@ static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
>  }
>  
>  bool __kasan_slab_free(struct kmem_cache *s, void *object,
> -			unsigned long ip, bool init);
> +			unsigned long ip, bool init, bool after_rcu_delay);
>  static __always_inline bool kasan_slab_free(struct kmem_cache *s,
> -						void *object, bool init)
> +						void *object, bool init,
> +						bool after_rcu_delay)
>  {
> -	if (kasan_enabled())
> -		return __kasan_slab_free(s, object, _RET_IP_, init);
> +	if (kasan_enabled()) {
> +		return __kasan_slab_free(s, object, _RET_IP_, init,
> +				after_rcu_delay);
> +	}
>  	return false;
>  }
>  
> @@ -387,7 +390,8 @@ static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
>  	return false;
>  }
>  
> -static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
> +static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
> +				   bool init, bool after_rcu_delay)
>  {
>  	return false;
>  }
> diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
> index afc72fde0f03..0c088532f5a7 100644
> --- a/mm/Kconfig.debug
> +++ b/mm/Kconfig.debug
> @@ -70,6 +70,35 @@ config SLUB_DEBUG_ON
>  	  off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
>  	  "slab_debug=-".
>  
> +config SLUB_RCU_DEBUG
> +	bool "Make use-after-free detection possible in TYPESAFE_BY_RCU caches"
> +	depends on SLUB_DEBUG
> +	default KASAN_GENERIC || KASAN_SW_TAGS
> +	help
> +	  Make SLAB_TYPESAFE_BY_RCU caches behave approximately as if the cache
> +	  was not marked as SLAB_TYPESAFE_BY_RCU and every caller used
> +	  kfree_rcu() instead.
> +
> +	  This is intended for use in combination with KASAN, to enable KASAN to
> +	  detect use-after-free accesses in such caches.
> +	  (KFENCE is able to do that independent of this flag.)
> +
> +	  This might degrade performance.
> +	  Unfortunately this also prevents a very specific bug pattern from
> +	  triggering (insufficient checks against an object being recycled
> +	  within the RCU grace period); so this option can be turned off even on
> +	  KASAN builds, in case you want to test for such a bug.
> +
> +	  If you're using this for testing bugs / fuzzing and care about
> +	  catching all the bugs WAY more than performance, you might want to
> +	  also turn on CONFIG_RCU_STRICT_GRACE_PERIOD.
> +
> +	  WARNING:
> +	  This is designed as a debugging feature, not a security feature.
> +	  Objects are sometimes recycled without RCU delay under memory pressure.
> +
> +	  If unsure, say N.
> +
>  config PAGE_OWNER
>  	bool "Track page owner"
>  	depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 7c7fc6ce7eb7..d92cb2e9189d 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -238,7 +238,8 @@ static enum free_validation_result check_slab_free(struct kmem_cache *cache,
>  }
>  
>  static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
> -				      unsigned long ip, bool init)
> +				      unsigned long ip, bool init,
> +				      bool after_rcu_delay)
>  {
>  	void *tagged_object = object;
>  	enum free_validation_result valid = check_slab_free(cache, object, ip);
> @@ -251,7 +252,8 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
>  	object = kasan_reset_tag(object);
>  
>  	/* RCU slabs could be legally used after free within the RCU period. */
> -	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
> +	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU) &&
> +	    !after_rcu_delay)
>  		return false;
>  
>  	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
> @@ -270,7 +272,8 @@ bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
>  }
>  
>  bool __kasan_slab_free(struct kmem_cache *cache, void *object,
> -				unsigned long ip, bool init)
> +				unsigned long ip, bool init,
> +				bool after_rcu_delay)
>  {
>  	if (is_kfence_address(object))
>  		return false;
> @@ -280,7 +283,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
>  	 * freelist. The object will thus never be allocated again and its
>  	 * metadata will never get released.
>  	 */
> -	if (poison_slab_object(cache, object, ip, init))
> +	if (poison_slab_object(cache, object, ip, init, after_rcu_delay))
>  		return true;
>  
>  	/*
> @@ -535,7 +538,7 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
>  		return false;
>  
>  	slab = folio_slab(folio);
> -	return !poison_slab_object(slab->slab_cache, ptr, ip, false);
> +	return !poison_slab_object(slab->slab_cache, ptr, ip, false, false);
>  }
>  
>  void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
> diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
> index 7b32be2a3cf0..cba782a4b072 100644
> --- a/mm/kasan/kasan_test.c
> +++ b/mm/kasan/kasan_test.c
> @@ -996,6 +996,49 @@ static void kmem_cache_invalid_free(struct kunit *test)
>  	kmem_cache_destroy(cache);
>  }
>  
> +static void kmem_cache_rcu_uaf(struct kunit *test)
> +{
> +	char *p;
> +	size_t size = 200;
> +	struct kmem_cache *cache;
> +
> +	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB_RCU_DEBUG);
> +
> +	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
> +				  NULL);
> +	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
> +
> +	p = kmem_cache_alloc(cache, GFP_KERNEL);
> +	if (!p) {
> +		kunit_err(test, "Allocation failed: %s\n", __func__);
> +		kmem_cache_destroy(cache);
> +		return;
> +	}
> +	*p = 1;
> +
> +	rcu_read_lock();
> +
> +	/* Free the object - this will internally schedule an RCU callback. */
> +	kmem_cache_free(cache, p);
> +
> +	/* We should still be allowed to access the object at this point because
> +	 * the cache is SLAB_TYPESAFE_BY_RCU and we've been in an RCU read-side
> +	 * critical section since before the kmem_cache_free().
> +	 */
> +	READ_ONCE(*p);
> +
> +	rcu_read_unlock();
> +
> +	/* Wait for the RCU callback to execute; after this, the object should
> +	 * have actually been freed from KASAN's perspective.
> +	 */
> +	rcu_barrier();
> +
> +	KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*p));
> +
> +	kmem_cache_destroy(cache);
> +}
> +
>  static void empty_cache_ctor(void *object) { }
>  
>  static void kmem_cache_double_destroy(struct kunit *test)
> @@ -1937,6 +1980,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
>  	KUNIT_CASE(kmem_cache_oob),
>  	KUNIT_CASE(kmem_cache_double_free),
>  	KUNIT_CASE(kmem_cache_invalid_free),
> +	KUNIT_CASE(kmem_cache_rcu_uaf),
>  	KUNIT_CASE(kmem_cache_double_destroy),
>  	KUNIT_CASE(kmem_cache_accounted),
>  	KUNIT_CASE(kmem_cache_bulk),
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 1560a1546bb1..19511e34017b 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -450,6 +450,18 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
>  
>  static int shutdown_cache(struct kmem_cache *s)
>  {
> +	if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) &&
> +	    (s->flags & SLAB_TYPESAFE_BY_RCU)) {
> +		/*
> +		 * Under CONFIG_SLUB_RCU_DEBUG, when objects in a
> +		 * SLAB_TYPESAFE_BY_RCU slab are freed, SLUB will internally
> +		 * defer their freeing with call_rcu().
> +		 * Wait for such call_rcu() invocations here before actually
> +		 * destroying the cache.
> +		 */
> +		rcu_barrier();
> +	}
> +
>  	/* free asan quarantined objects */
>  	kasan_cache_shutdown(s);
>  
> diff --git a/mm/slub.c b/mm/slub.c
> index 34724704c52d..f44eec209e3e 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2144,15 +2144,26 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
>  }
>  #endif /* CONFIG_MEMCG_KMEM */
>  
> +#ifdef CONFIG_SLUB_RCU_DEBUG
> +static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
> +
> +struct rcu_delayed_free {
> +	struct rcu_head head;
> +	void *object;
> +};
> +#endif
> +
>  /*
>   * Hooks for other subsystems that check memory allocations. In a typical
>   * production configuration these hooks all should produce no code at all.
>   *
>   * Returns true if freeing of the object can proceed, false if its reuse
> - * was delayed by KASAN quarantine, or it was returned to KFENCE.
> + * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
> + * to KFENCE.
>   */
>  static __always_inline
> -bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
> +bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
> +		    bool after_rcu_delay)
>  {
>  	kmemleak_free_recursive(x, s->flags);
>  	kmsan_slab_free(s, x);
> @@ -2163,7 +2174,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
>  		debug_check_no_obj_freed(x, s->object_size);
>  
>  	/* Use KCSAN to help debug racy use-after-free. */
> -	if (!(s->flags & SLAB_TYPESAFE_BY_RCU))
> +	if (!(s->flags & SLAB_TYPESAFE_BY_RCU) || after_rcu_delay)
>  		__kcsan_check_access(x, s->object_size,
>  				     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
>  
> @@ -2177,6 +2188,28 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
>  	if (kasan_slab_pre_free(s, x))
>  		return false;
>  
> +#ifdef CONFIG_SLUB_RCU_DEBUG
> +	if ((s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay) {
> +		struct rcu_delayed_free *delayed_free;
> +
> +		delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT);
> +		if (delayed_free) {
> +			/*
> +			 * Let KASAN track our call stack as a "related work
> +			 * creation", just like if the object had been freed
> +			 * normally via kfree_rcu().
> +			 * We have to do this manually because the rcu_head is
> +			 * not located inside the object.
> +			 */
> +			kasan_record_aux_stack_noalloc(x);
> +
> +			delayed_free->object = x;
> +			call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
> +			return false;
> +		}
> +	}
> +#endif /* CONFIG_SLUB_RCU_DEBUG */
> +
>  	/*
>  	 * As memory initialization might be integrated into KASAN,
>  	 * kasan_slab_free and initialization memset's must be
> @@ -2200,7 +2233,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
>  		       s->size - inuse - rsize);
>  	}
>  	/* KASAN might put x into memory quarantine, delaying its reuse. */
> -	return !kasan_slab_free(s, x, init);
> +	return !kasan_slab_free(s, x, init, after_rcu_delay);
>  }
>  
>  static __fastpath_inline
> @@ -2214,7 +2247,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
>  	bool init;
>  
>  	if (is_kfence_address(next)) {
> -		slab_free_hook(s, next, false);
> +		slab_free_hook(s, next, false, false);
>  		return false;
>  	}
>  
> @@ -2229,7 +2262,7 @@ bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
>  		next = get_freepointer(s, object);
>  
>  		/* If object's reuse doesn't have to be delayed */
> -		if (likely(slab_free_hook(s, object, init))) {
> +		if (likely(slab_free_hook(s, object, init, false))) {
>  			/* Move object to the new freelist */
>  			set_freepointer(s, object, *head);
>  			*head = object;
> @@ -4442,7 +4475,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
>  	memcg_slab_free_hook(s, slab, &object, 1);
>  	alloc_tagging_slab_free_hook(s, slab, &object, 1);
>  
> -	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
> +	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
>  		do_slab_free(s, slab, object, object, 1, addr);
>  }
>  
> @@ -4451,7 +4484,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
>  static noinline
>  void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
>  {
> -	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
> +	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
>  		do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
>  }
>  #endif
> @@ -4470,6 +4503,33 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
>  		do_slab_free(s, slab, head, tail, cnt, addr);
>  }
>  
> +#ifdef CONFIG_SLUB_RCU_DEBUG
> +static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
> +{
> +	struct rcu_delayed_free *delayed_free =
> +			container_of(rcu_head, struct rcu_delayed_free, head);
> +	void *object = delayed_free->object;
> +	struct slab *slab = virt_to_slab(object);
> +	struct kmem_cache *s;
> +
> +	if (WARN_ON(is_kfence_address(rcu_head)))
> +		return;
> +
> +	/* find the object and the cache again */
> +	if (WARN_ON(!slab))
> +		return;
> +	s = slab->slab_cache;
> +	if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
> +		return;
> +
> +	/* resume freeing */
> +	if (!slab_free_hook(s, object, slab_want_init_on_free(s), true))
> +		return;
> +	do_slab_free(s, slab, object, NULL, 1, _THIS_IP_);
> +	kfree(delayed_free);
> +}
> +#endif /* CONFIG_SLUB_RCU_DEBUG */
> +
>  #ifdef CONFIG_KASAN_GENERIC
>  void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
>  {
>