[PATCH v2 07/22] mm: KUnit tests for the mermap

Brendan Jackman posted 22 patches 2 weeks ago
[PATCH v2 07/22] mm: KUnit tests for the mermap
Posted by Brendan Jackman 2 weeks ago
Some simple smoke-tests for the mermap. Mainly aiming to test:

1. That there aren't any silly off-by-ones.

2. That the pagetables are not completely broken.

3. That the TLB appears to get flushed basically when expected.

This last point requires a bit of ifdeffery to detect when the flushing
has been performed.

Signed-off-by: Brendan Jackman <jackmanb@google.com>
---
 include/linux/mermap_types.h |   3 +
 mm/Kconfig                   |  11 ++
 mm/Makefile                  |   1 +
 mm/tests/mermap_kunit.c      | 250 +++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 265 insertions(+)

diff --git a/include/linux/mermap_types.h b/include/linux/mermap_types.h
index c1c83b223c28d..13110fcb4c387 100644
--- a/include/linux/mermap_types.h
+++ b/include/linux/mermap_types.h
@@ -24,6 +24,9 @@ struct mermap_cpu {
 	unsigned long next_addr;
 	struct mermap_alloc normal_allocs[3];
 	struct mermap_alloc reserve_alloc;
+#if IS_ENABLED(CONFIG_MERMAP_KUNIT_TEST)
+	u64 tlb_flushes;
+#endif
 };
 
 struct mermap {
diff --git a/mm/Kconfig b/mm/Kconfig
index 2bf1dbcc8cb10..e98db58d515fc 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1494,4 +1494,15 @@ config MERMAP
 	help
 	  Support for epheMERal mappings within the kernel.
 
+config MERMAP_KUNIT_TEST
+	tristate "KUnit tests for the mermap" if !KUNIT_ALL_TESTS
+	depends on ARCH_SUPPORTS_MERMAP
+	depends on KUNIT
+	depends on MERMAP
+	default KUNIT_ALL_TESTS
+	help
+	  KUnit test for the mermap.
+
+	  If unsure, say N.
+
 endmenu
diff --git a/mm/Makefile b/mm/Makefile
index 0c45677f4a538..93a1756303cf9 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -151,3 +151,4 @@ obj-$(CONFIG_EXECMEM) += execmem.o
 obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o
 obj-$(CONFIG_LAZY_MMU_MODE_KUNIT_TEST) += tests/lazy_mmu_mode_kunit.o
 obj-$(CONFIG_MERMAP) += mermap.o
+obj-$(CONFIG_MERMAP_KUNIT_TEST) += tests/mermap_kunit.o
diff --git a/mm/tests/mermap_kunit.c b/mm/tests/mermap_kunit.c
new file mode 100644
index 0000000000000..4ac6bce2d75f7
--- /dev/null
+++ b/mm/tests/mermap_kunit.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/cacheflush.h>
+#include <linux/kthread.h>
+#include <linux/mermap.h>
+#include <linux/pgtable.h>
+
+#include <kunit/test.h>
+
+#define NR_NORMAL_ALLOCS ARRAY_SIZE(((struct mm_struct *)NULL)->mermap.cpu->normal_allocs)
+
+KUNIT_DEFINE_ACTION_WRAPPER(__free_page_wrapper, __free_page, struct page *);
+
+static inline struct page *alloc_page_wrapper(struct kunit *test, gfp_t gfp)
+{
+	struct page *page = alloc_page(gfp);
+
+	KUNIT_ASSERT_NOT_NULL(test, page);
+	KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, __free_page_wrapper, page), 0);
+	return page;
+}
+
+KUNIT_DEFINE_ACTION_WRAPPER(mmput_wrapper, mmput, struct mm_struct *);
+
+static inline struct mm_struct *mm_alloc_wrapper(struct kunit *test)
+{
+	struct mm_struct *mm = mm_alloc();
+
+	KUNIT_ASSERT_NOT_NULL(test, mm);
+	KUNIT_ASSERT_EQ(test, kunit_add_action_or_reset(test, mmput_wrapper, mm), 0);
+	return mm;
+}
+
+static inline struct mm_struct *get_mm(struct kunit *test)
+{
+	struct mm_struct *mm = mm_alloc_wrapper(test);
+
+	KUNIT_ASSERT_EQ(test, mermap_mm_prepare(mm), 0);
+	return mm;
+}
+
+struct __mermap_put_args {
+	struct mm_struct *mm;
+	struct mermap_alloc *alloc;
+	unsigned long size;
+};
+
+static inline void __mermap_put_wrapper(void *ctx)
+{
+	struct __mermap_put_args *args = (struct __mermap_put_args *)ctx;
+
+	__mermap_put(args->mm, args->alloc);
+}
+
+/* Call __mermap_get() with use_reserve=false, deal with cleanup. */
+static inline struct __mermap_put_args *
+__mermap_get_wrapper(struct kunit *test, struct mm_struct *mm,
+		     struct page *page, unsigned long size, pgprot_t prot)
+{
+	struct __mermap_put_args *args =
+		kunit_kmalloc(test, sizeof(struct __mermap_put_args), GFP_KERNEL);
+
+	KUNIT_ASSERT_NOT_NULL(test, args);
+	args->mm = mm;
+	args->alloc = __mermap_get(mm, page, size, prot, false);
+	args->size = size;
+
+	if (args->alloc) {
+		int err = kunit_add_action_or_reset(test, __mermap_put_wrapper, args);
+
+		KUNIT_ASSERT_EQ(test, err, 0);
+	}
+
+	return args;
+}
+
+/* Do the cleanup from __mermap_get_wrapper, now. */
+static inline void __mermap_put_early(struct kunit *test, struct __mermap_put_args *args)
+{
+	kunit_release_action(test, __mermap_put_wrapper, args);
+}
+
+static void test_basic_alloc(struct kunit *test)
+{
+	struct page *page = alloc_page_wrapper(test, GFP_KERNEL);
+	struct mm_struct *mm = get_mm(test);
+	struct __mermap_put_args *args;
+
+	args = __mermap_get_wrapper(test, mm, page, PAGE_SIZE, PAGE_KERNEL);
+	KUNIT_ASSERT_NOT_NULL(test, args->alloc);
+}
+
+/* Dumb check for off-by-ones. */
+static void test_size(struct kunit *test)
+{
+	struct page *page = alloc_page_wrapper(test, GFP_KERNEL);
+	struct __mermap_put_args *full, *large, *small, *fail;
+	struct mm_struct *mm = get_mm(test);
+	unsigned long region_size, large_size;
+	struct mermap_alloc *alloc;
+	int cpu;
+
+	migrate_disable();
+	cpu = raw_smp_processor_id();
+	region_size = mermap_cpu_end(cpu) - mermap_cpu_base(cpu) - PAGE_SIZE;
+	large_size = region_size - PAGE_SIZE;
+
+	/* Allocate whole region at once. */
+	full = __mermap_get_wrapper(test, mm, page, region_size, PAGE_KERNEL);
+	KUNIT_ASSERT_NOT_NULL(test, full->alloc);
+	__mermap_put_early(test, full);
+
+	/* Allocate larger than region size. */
+	fail = __mermap_get_wrapper(test, mm, page, region_size + PAGE_SIZE, PAGE_KERNEL);
+	KUNIT_ASSERT_NULL(test, fail->alloc);
+
+	/* Tiptoe up to the edge then past it. */
+	large = __mermap_get_wrapper(test, mm, page, large_size, PAGE_KERNEL);
+	KUNIT_ASSERT_NOT_NULL(test, large->alloc);
+	small = __mermap_get_wrapper(test, mm, page, PAGE_SIZE, PAGE_KERNEL);
+	KUNIT_ASSERT_NOT_NULL(test, small->alloc);
+	fail = __mermap_get_wrapper(test, mm, page, PAGE_SIZE, PAGE_KERNEL);
+	KUNIT_ASSERT_NULL(test, fail->alloc);
+
+	/* Can still allocate the reserved page. */
+	local_irq_disable();
+	alloc = __mermap_get(mm, page, PAGE_SIZE, PAGE_KERNEL, true);
+	local_irq_enable();
+	KUNIT_ASSERT_NOT_NULL(test, alloc);
+	__mermap_put(mm, alloc);
+}
+
+static void test_multiple_allocs(struct kunit *test)
+{
+	struct __mermap_put_args *argss[NR_NORMAL_ALLOCS] = { };
+	struct page *pages[NR_NORMAL_ALLOCS + 1];
+	struct mermap_alloc *reserved_alloc;
+	struct mm_struct *mm = get_mm(test);
+	int magic = 0xE4A4;
+
+	for (int i = 0; i < ARRAY_SIZE(pages); i++) {
+		pages[i] = alloc_page_wrapper(test, GFP_KERNEL);
+		WRITE_ONCE(*(int *)page_to_virt(pages[i]), magic + i);
+	}
+
+	for (int i = 0; i < ARRAY_SIZE(argss); i++) {
+		unsigned long base = mermap_cpu_base(raw_smp_processor_id());
+		unsigned long end = mermap_cpu_end(raw_smp_processor_id());
+		unsigned long addr;
+
+		argss[i] = __mermap_get_wrapper(test, mm, pages[i], PAGE_SIZE, PAGE_KERNEL);
+		KUNIT_ASSERT_NOT_NULL_MSG(test, argss[i], "alloc %d failed", i);
+
+		addr = (unsigned long) mermap_addr(argss[i]->alloc);
+		KUNIT_EXPECT_GE_MSG(test, addr, base, "alloc %d out of range", i);
+		KUNIT_EXPECT_LT_MSG(test, addr, end, "alloc %d out of range", i);
+	};
+
+	/*
+	 * Read through the mappings to try and detect if they point to the
+	 * pages we wrote earlier.
+	 */
+	kthread_use_mm(mm);
+	for (int i = 0; i < ARRAY_SIZE(pages) - 1; i++) {
+		int *ptr  = (int *)mermap_addr(argss[i]->alloc);
+
+		KUNIT_EXPECT_EQ(test, *ptr, magic + i);
+	}
+
+	/* Run out of alloc structures, only reserved allocs should succeed now. */
+	KUNIT_ASSERT_NULL(test, __mermap_get(mm, pages[NR_NORMAL_ALLOCS],
+					     PAGE_SIZE, PAGE_KERNEL, false));
+	preempt_disable();
+	reserved_alloc = __mermap_get(mm, pages[NR_NORMAL_ALLOCS],
+				      PAGE_SIZE, PAGE_KERNEL, true);
+	KUNIT_EXPECT_NOT_NULL(test, reserved_alloc);
+	/* Also check if this mapping seems correct*/
+	if (reserved_alloc) {
+		int *ptr  = (int *)mermap_addr(reserved_alloc);
+
+		KUNIT_EXPECT_EQ(test, *ptr, magic + NR_NORMAL_ALLOCS);
+
+		mermap_put(reserved_alloc);
+	}
+	preempt_enable();
+
+	kthread_unuse_mm(mm);
+}
+
+static void test_tlb_flushed(struct kunit *test)
+{
+	struct page *page = alloc_page_wrapper(test, GFP_KERNEL);
+	struct mm_struct *mm = get_mm(test);
+	unsigned long addr, prev_addr = 0;
+	/* Avoid running for ever in failure case. */
+	unsigned long max_iters = 1000000;
+	struct mermap_cpu *mc;
+
+	migrate_disable();
+	mc = this_cpu_ptr(mm->mermap.cpu);
+
+	/*
+	 * Allocate until we see an address less than what we had before - assume
+	 * that means a reuse.
+	 */
+	for (int i = 0; i < max_iters; i++) {
+		struct mermap_alloc *alloc;
+
+		/*
+		 * Obviously flushing the TLB already is not wrong per se, but
+		 * it's unexpected and probably means there's some bug.
+		 * Use ASSERT to avoid spamming the log in the failure case.
+		 */
+		KUNIT_ASSERT_EQ_MSG(test, mc->tlb_flushes, 0,
+				    "unexpected flush before alloc %d", i);
+
+		alloc = __mermap_get(mm, page, PAGE_SIZE, PAGE_KERNEL, false);
+		KUNIT_ASSERT_NOT_NULL_MSG(test, alloc, "alloc %d failed", i);
+
+		addr = (unsigned long)mermap_addr(alloc);
+		__mermap_put(mm, alloc);
+		if (addr < prev_addr)
+			break;
+
+		prev_addr = addr;
+		cond_resched();
+	}
+	KUNIT_ASSERT_TRUE_MSG(test, addr < prev_addr, "no address reuse");
+	/* Again, more than one flush isn't wrong per se, but probably a bug. */
+	KUNIT_ASSERT_EQ(test, mc->tlb_flushes, 1);
+
+	migrate_enable();
+}
+
+static struct kunit_case mermap_test_cases[] = {
+	KUNIT_CASE(test_basic_alloc),
+	KUNIT_CASE(test_size),
+	KUNIT_CASE(test_multiple_allocs),
+	KUNIT_CASE(test_tlb_flushed),
+	{}
+};
+
+static struct kunit_suite mermap_test_suite = {
+	.name = "mermap",
+	.test_cases = mermap_test_cases,
+};
+kunit_test_suite(mermap_test_suite);
+
+MODULE_DESCRIPTION("Mermap unit tests");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");

-- 
2.51.2
Re: [PATCH v2 07/22] mm: KUnit tests for the mermap
Posted by kernel test robot 1 week, 3 days ago
Hi Brendan,

kernel test robot noticed the following build warnings:

[auto build test WARNING on b5d083a3ed1e2798396d5e491432e887da8d4a06]

url:    https://github.com/intel-lab-lkp/linux/commits/Brendan-Jackman/x86-mm-split-out-preallocate_sub_pgd/20260321-042521
base:   b5d083a3ed1e2798396d5e491432e887da8d4a06
patch link:    https://lore.kernel.org/r/20260320-page_alloc-unmapped-v2-7-28bf1bd54f41%40google.com
patch subject: [PATCH v2 07/22] mm: KUnit tests for the mermap
config: x86_64-randconfig-101-20260324 (https://download.01.org/0day-ci/archive/20260324/202603241512.3kG43FzT-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603241512.3kG43FzT-lkp@intel.com/

cocci warnings: (new ones prefixed by >>)
>> mm/tests/mermap_kunit.c:156:2-3: Unneeded semicolon

vim +156 mm/tests/mermap_kunit.c

   131	
   132	static void test_multiple_allocs(struct kunit *test)
   133	{
   134		struct __mermap_put_args *argss[NR_NORMAL_ALLOCS] = { };
   135		struct page *pages[NR_NORMAL_ALLOCS + 1];
   136		struct mermap_alloc *reserved_alloc;
   137		struct mm_struct *mm = get_mm(test);
   138		int magic = 0xE4A4;
   139	
   140		for (int i = 0; i < ARRAY_SIZE(pages); i++) {
   141			pages[i] = alloc_page_wrapper(test, GFP_KERNEL);
   142			WRITE_ONCE(*(int *)page_to_virt(pages[i]), magic + i);
   143		}
   144	
   145		for (int i = 0; i < ARRAY_SIZE(argss); i++) {
   146			unsigned long base = mermap_cpu_base(raw_smp_processor_id());
   147			unsigned long end = mermap_cpu_end(raw_smp_processor_id());
   148			unsigned long addr;
   149	
   150			argss[i] = __mermap_get_wrapper(test, mm, pages[i], PAGE_SIZE, PAGE_KERNEL);
   151			KUNIT_ASSERT_NOT_NULL_MSG(test, argss[i], "alloc %d failed", i);
   152	
   153			addr = (unsigned long) mermap_addr(argss[i]->alloc);
   154			KUNIT_EXPECT_GE_MSG(test, addr, base, "alloc %d out of range", i);
   155			KUNIT_EXPECT_LT_MSG(test, addr, end, "alloc %d out of range", i);
 > 156		};
   157	
   158		/*
   159		 * Read through the mappings to try and detect if they point to the
   160		 * pages we wrote earlier.
   161		 */
   162		kthread_use_mm(mm);
   163		for (int i = 0; i < ARRAY_SIZE(pages) - 1; i++) {
   164			int *ptr  = (int *)mermap_addr(argss[i]->alloc);
   165	
   166			KUNIT_EXPECT_EQ(test, *ptr, magic + i);
   167		}
   168	
   169		/* Run out of alloc structures, only reserved allocs should succeed now. */
   170		KUNIT_ASSERT_NULL(test, __mermap_get(mm, pages[NR_NORMAL_ALLOCS],
   171						     PAGE_SIZE, PAGE_KERNEL, false));
   172		preempt_disable();
   173		reserved_alloc = __mermap_get(mm, pages[NR_NORMAL_ALLOCS],
   174					      PAGE_SIZE, PAGE_KERNEL, true);
   175		KUNIT_EXPECT_NOT_NULL(test, reserved_alloc);
   176		/* Also check if this mapping seems correct*/
   177		if (reserved_alloc) {
   178			int *ptr  = (int *)mermap_addr(reserved_alloc);
   179	
   180			KUNIT_EXPECT_EQ(test, *ptr, magic + NR_NORMAL_ALLOCS);
   181	
   182			mermap_put(reserved_alloc);
   183		}
   184		preempt_enable();
   185	
   186		kthread_unuse_mm(mm);
   187	}
   188	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki