[PATCH 3/3] kfence: allow change number of object by early parameter

yuan linyu posted 3 patches 1 month, 3 weeks ago
There is a newer version of this series
[PATCH 3/3] kfence: allow change number of object by early parameter
Posted by yuan linyu 1 month, 3 weeks ago
when want to change the kfence pool size, currently it is not easy and
need to compile kernel.

Add an early boot parameter kfence.num_objects to allow change kfence
objects number and allow increate total pool to provide high failure
rate.

Signed-off-by: yuan linyu <yuanlinyu@honor.com>
---
 include/linux/kfence.h  |   5 +-
 mm/kfence/core.c        | 122 +++++++++++++++++++++++++++++-----------
 mm/kfence/kfence.h      |   4 +-
 mm/kfence/kfence_test.c |   2 +-
 4 files changed, 96 insertions(+), 37 deletions(-)

diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 0ad1ddbb8b99..920bcd5649fa 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -24,7 +24,10 @@ extern unsigned long kfence_sample_interval;
  * address to metadata indices; effectively, the very first page serves as an
  * extended guard page, but otherwise has no special purpose.
  */
-#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
+extern unsigned int __kfence_pool_size;
+#define KFENCE_POOL_SIZE (__kfence_pool_size)
+extern unsigned int __kfence_num_objects;
+#define KFENCE_NUM_OBJECTS (__kfence_num_objects)
 extern char *__kfence_pool;
 
 DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 24c6f1fa5b19..82425da5f27c 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -132,6 +132,31 @@ struct kfence_metadata *kfence_metadata __read_mostly;
  */
 static struct kfence_metadata *kfence_metadata_init __read_mostly;
 
+/* allow change number of objects from cmdline */
+#define KFENCE_MIN_NUM_OBJECTS 1
+#define KFENCE_MAX_NUM_OBJECTS 65535
+unsigned int __kfence_num_objects __read_mostly = CONFIG_KFENCE_NUM_OBJECTS;
+EXPORT_SYMBOL(__kfence_num_objects); /* Export for test modules. */
+static unsigned int __kfence_pool_pages __read_mostly = (CONFIG_KFENCE_NUM_OBJECTS + 1) * 2;
+unsigned int __kfence_pool_size __read_mostly = (CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE;
+EXPORT_SYMBOL(__kfence_pool_size); /* Export for lkdtm module. */
+
+static int __init early_parse_kfence_num_objects(char *buf)
+{
+	unsigned int num;
+	int ret = kstrtouint(buf, 10, &num);
+
+	if (ret < 0)
+		return ret;
+
+	__kfence_num_objects = clamp(num, KFENCE_MIN_NUM_OBJECTS, KFENCE_MAX_NUM_OBJECTS);
+	__kfence_pool_pages = (__kfence_num_objects + 1) * 2;
+	__kfence_pool_size = __kfence_pool_pages * PAGE_SIZE;
+
+	return 0;
+}
+early_param("kfence.num_objects", early_parse_kfence_num_objects);
+
 /* Freelist with available objects. */
 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
@@ -155,12 +180,13 @@ atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
  *
  *	P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
  */
+static unsigned int kfence_alloc_covered_order __read_mostly;
+static unsigned int kfence_alloc_covered_mask __read_mostly;
+static atomic_t *alloc_covered __read_mostly;
 #define ALLOC_COVERED_HNUM	2
-#define ALLOC_COVERED_ORDER	(const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
-#define ALLOC_COVERED_SIZE	(1 << ALLOC_COVERED_ORDER)
-#define ALLOC_COVERED_HNEXT(h)	hash_32(h, ALLOC_COVERED_ORDER)
-#define ALLOC_COVERED_MASK	(ALLOC_COVERED_SIZE - 1)
-static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
+#define ALLOC_COVERED_HNEXT(h)	hash_32(h, kfence_alloc_covered_order)
+#define ALLOC_COVERED_MASK		(kfence_alloc_covered_mask)
+#define KFENCE_COVERED_SIZE		(sizeof(atomic_t) * (1 << kfence_alloc_covered_order))
 
 /* Stack depth used to determine uniqueness of an allocation. */
 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
@@ -200,7 +226,7 @@ static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
 
 static inline bool should_skip_covered(void)
 {
-	unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
+	unsigned long thresh = (__kfence_num_objects * kfence_skip_covered_thresh) / 100;
 
 	return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
 }
@@ -262,7 +288,7 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
 
 	/* Only call with a pointer into kfence_metadata. */
 	if (KFENCE_WARN_ON(meta < kfence_metadata ||
-			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
+			   meta >= kfence_metadata + __kfence_num_objects))
 		return 0;
 
 	/*
@@ -612,7 +638,7 @@ static unsigned long kfence_init_pool(void)
 	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
 	 * enters __slab_free() slow-path.
 	 */
-	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+	for (i = 0; i < __kfence_pool_pages; i++) {
 		struct page *page;
 
 		if (!i || (i % 2))
@@ -640,7 +666,7 @@ static unsigned long kfence_init_pool(void)
 		addr += PAGE_SIZE;
 	}
 
-	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+	for (i = 0; i < __kfence_num_objects; i++) {
 		struct kfence_metadata *meta = &kfence_metadata_init[i];
 
 		/* Initialize metadata. */
@@ -666,7 +692,7 @@ static unsigned long kfence_init_pool(void)
 	return 0;
 
 reset_slab:
-	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+	for (i = 0; i < __kfence_pool_pages; i++) {
 		struct page *page;
 
 		if (!i || (i % 2))
@@ -710,7 +736,7 @@ static bool __init kfence_init_pool_early(void)
 	 * fails for the first page, and therefore expect addr==__kfence_pool in
 	 * most failure cases.
 	 */
-	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
+	memblock_free_late(__pa(addr), __kfence_pool_size - (addr - (unsigned long)__kfence_pool));
 	__kfence_pool = NULL;
 
 	memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
@@ -740,7 +766,7 @@ DEFINE_SHOW_ATTRIBUTE(stats);
  */
 static void *start_object(struct seq_file *seq, loff_t *pos)
 {
-	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
+	if (*pos < __kfence_num_objects)
 		return (void *)((long)*pos + 1);
 	return NULL;
 }
@@ -752,7 +778,7 @@ static void stop_object(struct seq_file *seq, void *v)
 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
 {
 	++*pos;
-	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
+	if (*pos < __kfence_num_objects)
 		return (void *)((long)*pos + 1);
 	return NULL;
 }
@@ -796,7 +822,7 @@ static void kfence_check_all_canary(void)
 {
 	int i;
 
-	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+	for (i = 0; i < __kfence_num_objects; i++) {
 		struct kfence_metadata *meta = &kfence_metadata[i];
 
 		if (kfence_obj_allocated(meta))
@@ -891,7 +917,7 @@ void __init kfence_alloc_pool_and_metadata(void)
 	 * re-allocate the memory pool.
 	 */
 	if (!__kfence_pool)
-		__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+		__kfence_pool = memblock_alloc(__kfence_pool_size, PAGE_SIZE);
 
 	if (!__kfence_pool) {
 		pr_err("failed to allocate pool\n");
@@ -900,11 +926,23 @@ void __init kfence_alloc_pool_and_metadata(void)
 
 	/* The memory allocated by memblock has been zeroed out. */
 	kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
-	if (!kfence_metadata_init) {
-		pr_err("failed to allocate metadata\n");
-		memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
-		__kfence_pool = NULL;
-	}
+	if (!kfence_metadata_init)
+		goto fail_pool;
+
+	kfence_alloc_covered_order = ilog2(__kfence_num_objects) + 2;
+	kfence_alloc_covered_mask = (1 << kfence_alloc_covered_order) - 1;
+	alloc_covered = memblock_alloc(KFENCE_COVERED_SIZE, PAGE_SIZE);
+	if (alloc_covered)
+		return;
+
+	pr_err("failed to allocate covered\n");
+	memblock_free(kfence_metadata_init, KFENCE_METADATA_SIZE);
+	kfence_metadata_init = NULL;
+
+fail_pool:
+	pr_err("failed to allocate metadata\n");
+	memblock_free(__kfence_pool, __kfence_pool_size);
+	__kfence_pool = NULL;
 }
 
 static void kfence_init_enable(void)
@@ -927,9 +965,9 @@ static void kfence_init_enable(void)
 	WRITE_ONCE(kfence_enabled, true);
 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
 
-	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
-		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
-		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
+	pr_info("initialized - using %u bytes for %d objects at 0x%p-0x%p\n", __kfence_pool_size,
+		__kfence_num_objects, (void *)__kfence_pool,
+		(void *)(__kfence_pool + __kfence_pool_size));
 }
 
 void __init kfence_init(void)
@@ -950,41 +988,53 @@ void __init kfence_init(void)
 
 static int kfence_init_late(void)
 {
-	const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
-	const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
+	unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
 	unsigned long addr = (unsigned long)__kfence_pool;
-	unsigned long free_size = KFENCE_POOL_SIZE;
+	unsigned long free_size = __kfence_pool_size;
+	unsigned long nr_pages_covered, covered_size;
 	int err = -ENOMEM;
 
+	kfence_alloc_covered_order = ilog2(__kfence_num_objects) + 2;
+	kfence_alloc_covered_mask = (1 << kfence_alloc_covered_order) - 1;
+	covered_size =  PAGE_ALIGN(KFENCE_COVERED_SIZE);
+	nr_pages_covered = (covered_size / PAGE_SIZE);
 #ifdef CONFIG_CONTIG_ALLOC
 	struct page *pages;
 
-	pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
+	pages = alloc_contig_pages(__kfence_pool_pages, GFP_KERNEL, first_online_node,
 				   NULL);
 	if (!pages)
 		return -ENOMEM;
 
 	__kfence_pool = page_to_virt(pages);
+	pages = alloc_contig_pages(nr_pages_covered, GFP_KERNEL, first_online_node,
+				   NULL);
+	if (!pages)
+		goto free_pool;
+	alloc_covered = page_to_virt(pages);
 	pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
 				   NULL);
 	if (pages)
 		kfence_metadata_init = page_to_virt(pages);
 #else
-	if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
+	if (__kfence_pool_pages > MAX_ORDER_NR_PAGES ||
 	    nr_pages_meta > MAX_ORDER_NR_PAGES) {
 		pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
 		return -EINVAL;
 	}
 
-	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
+	__kfence_pool = alloc_pages_exact(__kfence_pool_size, GFP_KERNEL);
 	if (!__kfence_pool)
 		return -ENOMEM;
 
+	alloc_covered = alloc_pages_exact(covered_size, GFP_KERNEL);
+	if (!alloc_covered)
+		goto free_pool;
 	kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
 #endif
 
 	if (!kfence_metadata_init)
-		goto free_pool;
+		goto free_cover;
 
 	memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
 	addr = kfence_init_pool();
@@ -995,22 +1045,28 @@ static int kfence_init_late(void)
 	}
 
 	pr_err("%s failed\n", __func__);
-	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
+	free_size = __kfence_pool_size - (addr - (unsigned long)__kfence_pool);
 	err = -EBUSY;
 
 #ifdef CONFIG_CONTIG_ALLOC
 	free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
 			  nr_pages_meta);
+free_cover:
+	free_contig_range(page_to_pfn(virt_to_page((void *)alloc_covered)),
+			  nr_pages_covered);
 free_pool:
 	free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
 			  free_size / PAGE_SIZE);
 #else
 	free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
+free_cover:
+	free_pages_exact((void *)alloc_covered, covered_size);
 free_pool:
 	free_pages_exact((void *)addr, free_size);
 #endif
 
 	kfence_metadata_init = NULL;
+	alloc_covered = NULL;
 	__kfence_pool = NULL;
 	return err;
 }
@@ -1036,7 +1092,7 @@ void kfence_shutdown_cache(struct kmem_cache *s)
 	if (!smp_load_acquire(&kfence_metadata))
 		return;
 
-	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+	for (i = 0; i < __kfence_num_objects; i++) {
 		bool in_use;
 
 		meta = &kfence_metadata[i];
@@ -1074,7 +1130,7 @@ void kfence_shutdown_cache(struct kmem_cache *s)
 		}
 	}
 
-	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+	for (i = 0; i < __kfence_num_objects; i++) {
 		meta = &kfence_metadata[i];
 
 		/* See above. */
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index dfba5ea06b01..dc3abb27c632 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -104,7 +104,7 @@ struct kfence_metadata {
 };
 
 #define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \
-					CONFIG_KFENCE_NUM_OBJECTS)
+					__kfence_num_objects)
 
 extern struct kfence_metadata *kfence_metadata;
 
@@ -123,7 +123,7 @@ static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
 	 * error.
 	 */
 	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
-	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+	if (index < 0 || index >= __kfence_num_objects)
 		return NULL;
 
 	return &kfence_metadata[index];
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 00034e37bc9f..00a51aa4bad9 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -641,7 +641,7 @@ static void test_gfpzero(struct kunit *test)
 			break;
 		test_free(buf2);
 
-		if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) {
+		if (kthread_should_stop() || (i == __kfence_num_objects)) {
 			kunit_warn(test, "giving up ... cannot get same object back\n");
 			return;
 		}
-- 
2.25.1
Re: [PATCH 3/3] kfence: allow change number of object by early parameter
Posted by Andrew Morton 1 month, 3 weeks ago
On Thu, 18 Dec 2025 09:58:49 +0800 yuan linyu <yuanlinyu@honor.com> wrote:

> when want to change the kfence pool size, currently it is not easy and
> need to compile kernel.
> 
> Add an early boot parameter kfence.num_objects to allow change kfence
> objects number and allow increate total pool to provide high failure
> rate.
> 
> ...
>
>  include/linux/kfence.h  |   5 +-
>  mm/kfence/core.c        | 122 +++++++++++++++++++++++++++++-----------
>  mm/kfence/kfence.h      |   4 +-
>  mm/kfence/kfence_test.c |   2 +-

Can you please add some documentation in Documentation/dev-tools/kfence.rst?

Also, this should be described in
Documentation/admin-guide/kernel-parameters.txt.  That file doesn't
mention kfence at all, which might be an oversight.

Meanwhile, I'll queue these patches in mm.git's mm-nonmm-unstable
branch for some testing.  I'll await reviewer input before proceeding
further.  Thanks.
Re: [PATCH 3/3] kfence: allow change number of object by early parameter
Posted by Marco Elver 1 month, 3 weeks ago
On Fri, 19 Dec 2025 at 00:58, Andrew Morton <akpm@linux-foundation.org> wrote:
>
> On Thu, 18 Dec 2025 09:58:49 +0800 yuan linyu <yuanlinyu@honor.com> wrote:
>
> > when want to change the kfence pool size, currently it is not easy and
> > need to compile kernel.
> >
> > Add an early boot parameter kfence.num_objects to allow change kfence
> > objects number and allow increate total pool to provide high failure
> > rate.
> >
> > ...
> >
> >  include/linux/kfence.h  |   5 +-
> >  mm/kfence/core.c        | 122 +++++++++++++++++++++++++++++-----------
> >  mm/kfence/kfence.h      |   4 +-
> >  mm/kfence/kfence_test.c |   2 +-
>
> Can you please add some documentation in Documentation/dev-tools/kfence.rst?
>
> Also, this should be described in
> Documentation/admin-guide/kernel-parameters.txt.  That file doesn't
> mention kfence at all, which might be an oversight.
>
> Meanwhile, I'll queue these patches in mm.git's mm-nonmm-unstable
> branch for some testing.  I'll await reviewer input before proceeding
> further.  Thanks.

Note, there was an v2 sent 5 hours after this v1, which I had
commented on here:
https://lore.kernel.org/all/aUPB18Xeh1BhF9GS@elver.google.com/

Thanks,
-- Marco
Re: [PATCH 3/3] kfence: allow change number of object by early parameter
Posted by Andrew Morton 1 month, 3 weeks ago
On Fri, 19 Dec 2025 01:03:11 +0100 Marco Elver <elver@google.com> wrote:

> > >  include/linux/kfence.h  |   5 +-
> > >  mm/kfence/core.c        | 122 +++++++++++++++++++++++++++++-----------
> > >  mm/kfence/kfence.h      |   4 +-
> > >  mm/kfence/kfence_test.c |   2 +-
> >
> > Can you please add some documentation in Documentation/dev-tools/kfence.rst?
> >
> > Also, this should be described in
> > Documentation/admin-guide/kernel-parameters.txt.  That file doesn't
> > mention kfence at all, which might be an oversight.
> >
> > Meanwhile, I'll queue these patches in mm.git's mm-nonmm-unstable
> > branch for some testing.  I'll await reviewer input before proceeding
> > further.  Thanks.
> 
> Note, there was an v2 sent 5 hours after this v1, which I had
> commented on here:
> https://lore.kernel.org/all/aUPB18Xeh1BhF9GS@elver.google.com/

Ah, OK, thanks, I confused myself.  I'll drop the v1 series and shall
await a v3!