[PATCH V2 2/2] lib/tests/slub_kunit: add a test case for {kmalloc,kfree}_nolock

Harry Yoo (Oracle) posted 2 patches 2 hours ago
[PATCH V2 2/2] lib/tests/slub_kunit: add a test case for {kmalloc,kfree}_nolock
Posted by Harry Yoo (Oracle) 2 hours ago
Testing invocation of {kmalloc,kfree}_nolock() during kmalloc() or
kfree() is tricky, and it is even harder to ensure that slowpaths are
properly tested. Lack of such testing has led to late discovery of
the bug fixed by commit a1e244a9f177 ("mm/slab: use prandom if
!allow_spin").

Add a slub_kunit test that allocates and frees objects in a tight loop
while a perf event triggers interrupts (NMI or hardirq depending on
the arch) on the same task, invoking {kmalloc,kfree}_nolock() from the
overflow handler.

Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Harry Yoo (Oracle) <harry@kernel.org>
---
 lib/tests/slub_kunit.c | 92 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 92 insertions(+)

diff --git a/lib/tests/slub_kunit.c b/lib/tests/slub_kunit.c
index 848b682a2d70..fa6d31dbca16 100644
--- a/lib/tests/slub_kunit.c
+++ b/lib/tests/slub_kunit.c
@@ -7,6 +7,7 @@
 #include <linux/kernel.h>
 #include <linux/rcupdate.h>
 #include <linux/delay.h>
+#include <linux/perf_event.h>
 #include "../mm/slab.h"
 
 static struct kunit_resource resource;
@@ -291,6 +292,94 @@ static void test_krealloc_redzone_zeroing(struct kunit *test)
 	kmem_cache_destroy(s);
 }
 
+#ifdef CONFIG_PERF_EVENTS
+#define NR_ITERATIONS 1000
+#define NR_OBJECTS 1000
+static void *objects[NR_OBJECTS];
+
+struct test_nolock_context {
+	struct kunit *test;
+	int callback_count;
+	int alloc_ok;
+	int alloc_fail;
+	struct perf_event *event;
+};
+
+static struct perf_event_attr hw_attr = {
+	.type = PERF_TYPE_HARDWARE,
+	.config = PERF_COUNT_HW_CPU_CYCLES,
+	.size = sizeof(struct perf_event_attr),
+	.pinned = 1,
+	.disabled = 1,
+	.freq = 1,
+	.sample_freq = 100000,
+};
+
+static void overflow_handler_test_kmalloc_kfree_nolock(struct perf_event *event,
+						       struct perf_sample_data *data,
+						       struct pt_regs *regs)
+{
+	void *objp;
+	gfp_t gfp;
+	struct test_nolock_context *ctx = event->overflow_handler_context;
+
+	/* __GFP_ACCOUNT to test kmalloc_nolock() in alloc_slab_obj_exts() */
+	gfp = (ctx->callback_count % 2) ? 0 : __GFP_ACCOUNT;
+	objp = kmalloc_nolock(64, gfp, NUMA_NO_NODE);
+
+	if (objp)
+		ctx->alloc_ok++;
+	else
+		ctx->alloc_fail++;
+
+	kfree_nolock(objp);
+	ctx->callback_count++;
+}
+
+static void test_kmalloc_kfree_nolock(struct kunit *test)
+{
+	int i, j;
+	struct test_nolock_context ctx = { .test = test };
+	struct perf_event *event;
+	bool alloc_fail = false;
+
+	event = perf_event_create_kernel_counter(&hw_attr, -1, current,
+						 overflow_handler_test_kmalloc_kfree_nolock,
+						 &ctx);
+	if (IS_ERR(event))
+		kunit_skip(test, "Failed to create perf event");
+	ctx.event = event;
+	perf_event_enable(ctx.event);
+	for (i = 0; i < NR_ITERATIONS; i++) {
+		for (j = 0; j < NR_OBJECTS; j++) {
+			gfp_t gfp = (i % 2) ? GFP_KERNEL : GFP_KERNEL_ACCOUNT;
+
+			objects[j] = kmalloc(64, gfp);
+			if (!objects[j]) {
+				j--;
+				while (j >= 0)
+					kfree(objects[j--]);
+				alloc_fail = true;
+				goto cleanup;
+			}
+		}
+		for (j = 0; j < NR_OBJECTS; j++)
+			kfree(objects[j]);
+	}
+
+cleanup:
+	perf_event_disable(ctx.event);
+	perf_event_release_kernel(ctx.event);
+
+	kunit_info(test, "callback_count: %d, alloc_ok: %d, alloc_fail: %d\n",
+		   ctx.callback_count, ctx.alloc_ok, ctx.alloc_fail);
+
+	if (alloc_fail)
+		kunit_skip(test, "Allocation failed");
+	KUNIT_EXPECT_EQ(test, 0, slab_errors);
+}
+#endif
+
 static int test_init(struct kunit *test)
 {
 	slab_errors = 0;
@@ -315,6 +404,9 @@ static struct kunit_case test_cases[] = {
 	KUNIT_CASE(test_kfree_rcu_wq_destroy),
 	KUNIT_CASE(test_leak_destroy),
 	KUNIT_CASE(test_krealloc_redzone_zeroing),
+#ifdef CONFIG_PERF_EVENTS
+	KUNIT_CASE_SLOW(test_kmalloc_kfree_nolock),
+#endif
 	{}
 };
 
-- 
2.43.0