[PATCH v2 2/2] kfence: allow change number of object by early parameter

yuan linyu posted 2 patches 1 month, 3 weeks ago
[PATCH v2 2/2] kfence: allow change number of object by early parameter
Posted by yuan linyu 1 month, 3 weeks ago
when want to change the kfence pool size, currently it is not easy and
need to compile kernel.

Add an early boot parameter kfence.num_objects to allow change kfence
objects number and allow increate total pool to provide high failure
rate.

Signed-off-by: yuan linyu <yuanlinyu@honor.com>
---
 include/linux/kfence.h  |   5 +-
 mm/kfence/core.c        | 122 +++++++++++++++++++++++++++++-----------
 mm/kfence/kfence.h      |   4 +-
 mm/kfence/kfence_test.c |   2 +-
 4 files changed, 96 insertions(+), 37 deletions(-)

diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 0ad1ddbb8b99..920bcd5649fa 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -24,7 +24,10 @@ extern unsigned long kfence_sample_interval;
  * address to metadata indices; effectively, the very first page serves as an
  * extended guard page, but otherwise has no special purpose.
  */
-#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
+extern unsigned int __kfence_pool_size;
+#define KFENCE_POOL_SIZE (__kfence_pool_size)
+extern unsigned int __kfence_num_objects;
+#define KFENCE_NUM_OBJECTS (__kfence_num_objects)
 extern char *__kfence_pool;
 
 DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 577a1699c553..5d5cea59c7b6 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -132,6 +132,31 @@ struct kfence_metadata *kfence_metadata __read_mostly;
  */
 static struct kfence_metadata *kfence_metadata_init __read_mostly;
 
+/* allow change number of objects from cmdline */
+#define KFENCE_MIN_NUM_OBJECTS 1
+#define KFENCE_MAX_NUM_OBJECTS 65535
+unsigned int __kfence_num_objects __read_mostly = CONFIG_KFENCE_NUM_OBJECTS;
+EXPORT_SYMBOL(__kfence_num_objects); /* Export for test modules. */
+static unsigned int __kfence_pool_pages __read_mostly = (CONFIG_KFENCE_NUM_OBJECTS + 1) * 2;
+unsigned int __kfence_pool_size __read_mostly = (CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE;
+EXPORT_SYMBOL(__kfence_pool_size); /* Export for lkdtm module. */
+
+static int __init early_parse_kfence_num_objects(char *buf)
+{
+	unsigned int num;
+	int ret = kstrtouint(buf, 10, &num);
+
+	if (ret < 0)
+		return ret;
+
+	__kfence_num_objects = clamp(num, KFENCE_MIN_NUM_OBJECTS, KFENCE_MAX_NUM_OBJECTS);
+	__kfence_pool_pages = (__kfence_num_objects + 1) * 2;
+	__kfence_pool_size = __kfence_pool_pages * PAGE_SIZE;
+
+	return 0;
+}
+early_param("kfence.num_objects", early_parse_kfence_num_objects);
+
 /* Freelist with available objects. */
 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
@@ -155,12 +180,13 @@ atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
  *
  *	P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
  */
+static unsigned int kfence_alloc_covered_order __read_mostly;
+static unsigned int kfence_alloc_covered_mask __read_mostly;
+static atomic_t *alloc_covered __read_mostly;
 #define ALLOC_COVERED_HNUM	2
-#define ALLOC_COVERED_ORDER	(const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
-#define ALLOC_COVERED_SIZE	(1 << ALLOC_COVERED_ORDER)
-#define ALLOC_COVERED_HNEXT(h)	hash_32(h, ALLOC_COVERED_ORDER)
-#define ALLOC_COVERED_MASK	(ALLOC_COVERED_SIZE - 1)
-static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
+#define ALLOC_COVERED_HNEXT(h)	hash_32(h, kfence_alloc_covered_order)
+#define ALLOC_COVERED_MASK		(kfence_alloc_covered_mask)
+#define KFENCE_COVERED_SIZE		(sizeof(atomic_t) * (1 << kfence_alloc_covered_order))
 
 /* Stack depth used to determine uniqueness of an allocation. */
 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
@@ -200,7 +226,7 @@ static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
 
 static inline bool should_skip_covered(void)
 {
-	unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
+	unsigned long thresh = (__kfence_num_objects * kfence_skip_covered_thresh) / 100;
 
 	return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
 }
@@ -262,7 +288,7 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
 
 	/* Only call with a pointer into kfence_metadata. */
 	if (KFENCE_WARN_ON(meta < kfence_metadata ||
-			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
+			   meta >= kfence_metadata + __kfence_num_objects))
 		return 0;
 
 	/*
@@ -612,7 +638,7 @@ static unsigned long kfence_init_pool(void)
 	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
 	 * enters __slab_free() slow-path.
 	 */
-	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+	for (i = 0; i < __kfence_pool_pages; i++) {
 		struct page *page;
 
 		if (!i || (i % 2))
@@ -640,7 +666,7 @@ static unsigned long kfence_init_pool(void)
 		addr += PAGE_SIZE;
 	}
 
-	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+	for (i = 0; i < __kfence_num_objects; i++) {
 		struct kfence_metadata *meta = &kfence_metadata_init[i];
 
 		/* Initialize metadata. */
@@ -666,7 +692,7 @@ static unsigned long kfence_init_pool(void)
 	return 0;
 
 reset_slab:
-	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+	for (i = 0; i < __kfence_pool_pages; i++) {
 		struct page *page;
 
 		if (!i || (i % 2))
@@ -710,7 +736,7 @@ static bool __init kfence_init_pool_early(void)
 	 * fails for the first page, and therefore expect addr==__kfence_pool in
 	 * most failure cases.
 	 */
-	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
+	memblock_free_late(__pa(addr), __kfence_pool_size - (addr - (unsigned long)__kfence_pool));
 	__kfence_pool = NULL;
 
 	memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
@@ -740,7 +766,7 @@ DEFINE_SHOW_ATTRIBUTE(stats);
  */
 static void *start_object(struct seq_file *seq, loff_t *pos)
 {
-	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
+	if (*pos < __kfence_num_objects)
 		return (void *)((long)*pos + 1);
 	return NULL;
 }
@@ -752,7 +778,7 @@ static void stop_object(struct seq_file *seq, void *v)
 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
 {
 	++*pos;
-	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
+	if (*pos < __kfence_num_objects)
 		return (void *)((long)*pos + 1);
 	return NULL;
 }
@@ -799,7 +825,7 @@ static void kfence_check_all_canary(void)
 {
 	int i;
 
-	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+	for (i = 0; i < __kfence_num_objects; i++) {
 		struct kfence_metadata *meta = &kfence_metadata[i];
 
 		if (kfence_obj_allocated(meta))
@@ -894,7 +920,7 @@ void __init kfence_alloc_pool_and_metadata(void)
 	 * re-allocate the memory pool.
 	 */
 	if (!__kfence_pool)
-		__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+		__kfence_pool = memblock_alloc(__kfence_pool_size, PAGE_SIZE);
 
 	if (!__kfence_pool) {
 		pr_err("failed to allocate pool\n");
@@ -903,11 +929,23 @@ void __init kfence_alloc_pool_and_metadata(void)
 
 	/* The memory allocated by memblock has been zeroed out. */
 	kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
-	if (!kfence_metadata_init) {
-		pr_err("failed to allocate metadata\n");
-		memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
-		__kfence_pool = NULL;
-	}
+	if (!kfence_metadata_init)
+		goto fail_pool;
+
+	kfence_alloc_covered_order = ilog2(__kfence_num_objects) + 2;
+	kfence_alloc_covered_mask = (1 << kfence_alloc_covered_order) - 1;
+	alloc_covered = memblock_alloc(KFENCE_COVERED_SIZE, PAGE_SIZE);
+	if (alloc_covered)
+		return;
+
+	pr_err("failed to allocate covered\n");
+	memblock_free(kfence_metadata_init, KFENCE_METADATA_SIZE);
+	kfence_metadata_init = NULL;
+
+fail_pool:
+	pr_err("failed to allocate metadata\n");
+	memblock_free(__kfence_pool, __kfence_pool_size);
+	__kfence_pool = NULL;
 }
 
 static void kfence_init_enable(void)
@@ -930,9 +968,9 @@ static void kfence_init_enable(void)
 	WRITE_ONCE(kfence_enabled, true);
 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
 
-	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
-		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
-		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
+	pr_info("initialized - using %u bytes for %d objects at 0x%p-0x%p\n", __kfence_pool_size,
+		__kfence_num_objects, (void *)__kfence_pool,
+		(void *)(__kfence_pool + __kfence_pool_size));
 }
 
 void __init kfence_init(void)
@@ -953,41 +991,53 @@ void __init kfence_init(void)
 
 static int kfence_init_late(void)
 {
-	const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
-	const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
+	unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
 	unsigned long addr = (unsigned long)__kfence_pool;
-	unsigned long free_size = KFENCE_POOL_SIZE;
+	unsigned long free_size = __kfence_pool_size;
+	unsigned long nr_pages_covered, covered_size;
 	int err = -ENOMEM;
 
+	kfence_alloc_covered_order = ilog2(__kfence_num_objects) + 2;
+	kfence_alloc_covered_mask = (1 << kfence_alloc_covered_order) - 1;
+	covered_size =  PAGE_ALIGN(KFENCE_COVERED_SIZE);
+	nr_pages_covered = (covered_size / PAGE_SIZE);
 #ifdef CONFIG_CONTIG_ALLOC
 	struct page *pages;
 
-	pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
+	pages = alloc_contig_pages(__kfence_pool_pages, GFP_KERNEL, first_online_node,
 				   NULL);
 	if (!pages)
 		return -ENOMEM;
 
 	__kfence_pool = page_to_virt(pages);
+	pages = alloc_contig_pages(nr_pages_covered, GFP_KERNEL, first_online_node,
+				   NULL);
+	if (!pages)
+		goto free_pool;
+	alloc_covered = page_to_virt(pages);
 	pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
 				   NULL);
 	if (pages)
 		kfence_metadata_init = page_to_virt(pages);
 #else
-	if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
+	if (__kfence_pool_pages > MAX_ORDER_NR_PAGES ||
 	    nr_pages_meta > MAX_ORDER_NR_PAGES) {
 		pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
 		return -EINVAL;
 	}
 
-	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
+	__kfence_pool = alloc_pages_exact(__kfence_pool_size, GFP_KERNEL);
 	if (!__kfence_pool)
 		return -ENOMEM;
 
+	alloc_covered = alloc_pages_exact(covered_size, GFP_KERNEL);
+	if (!alloc_covered)
+		goto free_pool;
 	kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
 #endif
 
 	if (!kfence_metadata_init)
-		goto free_pool;
+		goto free_cover;
 
 	memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
 	addr = kfence_init_pool();
@@ -998,22 +1048,28 @@ static int kfence_init_late(void)
 	}
 
 	pr_err("%s failed\n", __func__);
-	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
+	free_size = __kfence_pool_size - (addr - (unsigned long)__kfence_pool);
 	err = -EBUSY;
 
 #ifdef CONFIG_CONTIG_ALLOC
 	free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
 			  nr_pages_meta);
+free_cover:
+	free_contig_range(page_to_pfn(virt_to_page((void *)alloc_covered)),
+			  nr_pages_covered);
 free_pool:
 	free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
 			  free_size / PAGE_SIZE);
 #else
 	free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
+free_cover:
+	free_pages_exact((void *)alloc_covered, covered_size);
 free_pool:
 	free_pages_exact((void *)addr, free_size);
 #endif
 
 	kfence_metadata_init = NULL;
+	alloc_covered = NULL;
 	__kfence_pool = NULL;
 	return err;
 }
@@ -1039,7 +1095,7 @@ void kfence_shutdown_cache(struct kmem_cache *s)
 	if (!smp_load_acquire(&kfence_metadata))
 		return;
 
-	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+	for (i = 0; i < __kfence_num_objects; i++) {
 		bool in_use;
 
 		meta = &kfence_metadata[i];
@@ -1077,7 +1133,7 @@ void kfence_shutdown_cache(struct kmem_cache *s)
 		}
 	}
 
-	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+	for (i = 0; i < __kfence_num_objects; i++) {
 		meta = &kfence_metadata[i];
 
 		/* See above. */
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index dfba5ea06b01..dc3abb27c632 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -104,7 +104,7 @@ struct kfence_metadata {
 };
 
 #define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \
-					CONFIG_KFENCE_NUM_OBJECTS)
+					__kfence_num_objects)
 
 extern struct kfence_metadata *kfence_metadata;
 
@@ -123,7 +123,7 @@ static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
 	 * error.
 	 */
 	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
-	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+	if (index < 0 || index >= __kfence_num_objects)
 		return NULL;
 
 	return &kfence_metadata[index];
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 00034e37bc9f..00a51aa4bad9 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -641,7 +641,7 @@ static void test_gfpzero(struct kunit *test)
 			break;
 		test_free(buf2);
 
-		if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) {
+		if (kthread_should_stop() || (i == __kfence_num_objects)) {
 			kunit_warn(test, "giving up ... cannot get same object back\n");
 			return;
 		}
-- 
2.25.1
Re: [PATCH v2 2/2] kfence: allow change number of object by early parameter
Posted by kernel test robot 1 month, 2 weeks ago
Hi yuan,

kernel test robot noticed the following build warnings:

[auto build test WARNING on akpm-mm/mm-everything]
[also build test WARNING on drm-misc/drm-misc-next linus/master v6.19-rc1 next-20251219]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/yuan-linyu/LoongArch-kfence-avoid-use-CONFIG_KFENCE_NUM_OBJECTS/20251218-144322
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20251218063916.1433615-3-yuanlinyu%40honor.com
patch subject: [PATCH v2 2/2] kfence: allow change number of object by early parameter
config: i386-buildonly-randconfig-001-20251219 (https://download.01.org/0day-ci/archive/20251220/202512202213.aA8qY41g-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20251220/202512202213.aA8qY41g-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202512202213.aA8qY41g-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> mm/kfence/core.c:997:16: warning: variable 'nr_pages_covered' set but not used [-Wunused-but-set-variable]
     997 |         unsigned long nr_pages_covered, covered_size;
         |                       ^
   1 warning generated.


vim +/nr_pages_covered +997 mm/kfence/core.c

   991	
   992	static int kfence_init_late(void)
   993	{
   994		unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
   995		unsigned long addr = (unsigned long)__kfence_pool;
   996		unsigned long free_size = __kfence_pool_size;
 > 997		unsigned long nr_pages_covered, covered_size;
   998		int err = -ENOMEM;
   999	
  1000		kfence_alloc_covered_order = ilog2(__kfence_num_objects) + 2;
  1001		kfence_alloc_covered_mask = (1 << kfence_alloc_covered_order) - 1;
  1002		covered_size =  PAGE_ALIGN(KFENCE_COVERED_SIZE);
  1003		nr_pages_covered = (covered_size / PAGE_SIZE);
  1004	#ifdef CONFIG_CONTIG_ALLOC
  1005		struct page *pages;
  1006	
  1007		pages = alloc_contig_pages(__kfence_pool_pages, GFP_KERNEL, first_online_node,
  1008					   NULL);
  1009		if (!pages)
  1010			return -ENOMEM;
  1011	
  1012		__kfence_pool = page_to_virt(pages);
  1013		pages = alloc_contig_pages(nr_pages_covered, GFP_KERNEL, first_online_node,
  1014					   NULL);
  1015		if (!pages)
  1016			goto free_pool;
  1017		alloc_covered = page_to_virt(pages);
  1018		pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
  1019					   NULL);
  1020		if (pages)
  1021			kfence_metadata_init = page_to_virt(pages);
  1022	#else
  1023		if (__kfence_pool_pages > MAX_ORDER_NR_PAGES ||
  1024		    nr_pages_meta > MAX_ORDER_NR_PAGES) {
  1025			pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
  1026			return -EINVAL;
  1027		}
  1028	
  1029		__kfence_pool = alloc_pages_exact(__kfence_pool_size, GFP_KERNEL);
  1030		if (!__kfence_pool)
  1031			return -ENOMEM;
  1032	
  1033		alloc_covered = alloc_pages_exact(covered_size, GFP_KERNEL);
  1034		if (!alloc_covered)
  1035			goto free_pool;
  1036		kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
  1037	#endif
  1038	
  1039		if (!kfence_metadata_init)
  1040			goto free_cover;
  1041	
  1042		memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
  1043		addr = kfence_init_pool();
  1044		if (!addr) {
  1045			kfence_init_enable();
  1046			kfence_debugfs_init();
  1047			return 0;
  1048		}
  1049	
  1050		pr_err("%s failed\n", __func__);
  1051		free_size = __kfence_pool_size - (addr - (unsigned long)__kfence_pool);
  1052		err = -EBUSY;
  1053	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Re: [PATCH v2 2/2] kfence: allow change number of object by early parameter
Posted by Marco Elver 1 month, 3 weeks ago
On Thu, Dec 18, 2025 at 02:39PM +0800, yuan linyu wrote:
> when want to change the kfence pool size, currently it is not easy and
> need to compile kernel.
> 
> Add an early boot parameter kfence.num_objects to allow change kfence
> objects number and allow increate total pool to provide high failure
> rate.
> 
> Signed-off-by: yuan linyu <yuanlinyu@honor.com>
> ---
>  include/linux/kfence.h  |   5 +-
>  mm/kfence/core.c        | 122 +++++++++++++++++++++++++++++-----------
>  mm/kfence/kfence.h      |   4 +-
>  mm/kfence/kfence_test.c |   2 +-
>  4 files changed, 96 insertions(+), 37 deletions(-)
> 
> diff --git a/include/linux/kfence.h b/include/linux/kfence.h
> index 0ad1ddbb8b99..920bcd5649fa 100644
> --- a/include/linux/kfence.h
> +++ b/include/linux/kfence.h
> @@ -24,7 +24,10 @@ extern unsigned long kfence_sample_interval;
>   * address to metadata indices; effectively, the very first page serves as an
>   * extended guard page, but otherwise has no special purpose.
>   */
> -#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
> +extern unsigned int __kfence_pool_size;
> +#define KFENCE_POOL_SIZE (__kfence_pool_size)
> +extern unsigned int __kfence_num_objects;
> +#define KFENCE_NUM_OBJECTS (__kfence_num_objects)
>  extern char *__kfence_pool;
>  

You have ignored the comment below in this file:

	/**
	 * is_kfence_address() - check if an address belongs to KFENCE pool
	 * @addr: address to check
	 *
	[...]
	 * Note: This function may be used in fast-paths, and is performance critical.
	 * Future changes should take this into account; for instance, we want to avoid
   >>	 * introducing another load and therefore need to keep KFENCE_POOL_SIZE a
   >>	 * constant (until immediate patching support is added to the kernel).
	 */
	static __always_inline bool is_kfence_address(const void *addr)
	{
		/*
		 * The __kfence_pool != NULL check is required to deal with the case
		 * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
		 * the slow-path after the range-check!
		 */
		return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
	}

While I think the change itself would be useful to have eventually, a
better design might be needed. It's unclear to me what the perf impact
is these days (a lot has changed since that comment was written). Could
you run some benchmarks to analyze if the fast path is affected by the
additional load (please do this for whichever arch you care about, but
also arm64 and x86)?

If performance is affected, all this could be guarded behind another
Kconfig option, but it's not great either.

>  DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
> diff --git a/mm/kfence/core.c b/mm/kfence/core.c
> index 577a1699c553..5d5cea59c7b6 100644
> --- a/mm/kfence/core.c
> +++ b/mm/kfence/core.c
> @@ -132,6 +132,31 @@ struct kfence_metadata *kfence_metadata __read_mostly;
>   */
>  static struct kfence_metadata *kfence_metadata_init __read_mostly;
>  
> +/* allow change number of objects from cmdline */
> +#define KFENCE_MIN_NUM_OBJECTS 1
> +#define KFENCE_MAX_NUM_OBJECTS 65535
> +unsigned int __kfence_num_objects __read_mostly = CONFIG_KFENCE_NUM_OBJECTS;
> +EXPORT_SYMBOL(__kfence_num_objects); /* Export for test modules. */
> +static unsigned int __kfence_pool_pages __read_mostly = (CONFIG_KFENCE_NUM_OBJECTS + 1) * 2;
> +unsigned int __kfence_pool_size __read_mostly = (CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE;
> +EXPORT_SYMBOL(__kfence_pool_size); /* Export for lkdtm module. */
> +
> +static int __init early_parse_kfence_num_objects(char *buf)
> +{
> +	unsigned int num;
> +	int ret = kstrtouint(buf, 10, &num);
> +
> +	if (ret < 0)
> +		return ret;
> +
> +	__kfence_num_objects = clamp(num, KFENCE_MIN_NUM_OBJECTS, KFENCE_MAX_NUM_OBJECTS);
> +	__kfence_pool_pages = (__kfence_num_objects + 1) * 2;
> +	__kfence_pool_size = __kfence_pool_pages * PAGE_SIZE;
> +
> +	return 0;
> +}
> +early_param("kfence.num_objects", early_parse_kfence_num_objects);
> +
>  /* Freelist with available objects. */
>  static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
>  static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
> @@ -155,12 +180,13 @@ atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
>   *
>   *	P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
>   */
> +static unsigned int kfence_alloc_covered_order __read_mostly;
> +static unsigned int kfence_alloc_covered_mask __read_mostly;
> +static atomic_t *alloc_covered __read_mostly;
>  #define ALLOC_COVERED_HNUM	2
> -#define ALLOC_COVERED_ORDER	(const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
> -#define ALLOC_COVERED_SIZE	(1 << ALLOC_COVERED_ORDER)
> -#define ALLOC_COVERED_HNEXT(h)	hash_32(h, ALLOC_COVERED_ORDER)
> -#define ALLOC_COVERED_MASK	(ALLOC_COVERED_SIZE - 1)
> -static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
> +#define ALLOC_COVERED_HNEXT(h)	hash_32(h, kfence_alloc_covered_order)
> +#define ALLOC_COVERED_MASK		(kfence_alloc_covered_mask)
> +#define KFENCE_COVERED_SIZE		(sizeof(atomic_t) * (1 << kfence_alloc_covered_order))
>  
>  /* Stack depth used to determine uniqueness of an allocation. */
>  #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
> @@ -200,7 +226,7 @@ static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
>  
>  static inline bool should_skip_covered(void)
>  {
> -	unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
> +	unsigned long thresh = (__kfence_num_objects * kfence_skip_covered_thresh) / 100;
>  
>  	return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
>  }
> @@ -262,7 +288,7 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m
>  
>  	/* Only call with a pointer into kfence_metadata. */
>  	if (KFENCE_WARN_ON(meta < kfence_metadata ||
> -			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
> +			   meta >= kfence_metadata + __kfence_num_objects))
>  		return 0;
>  
>  	/*
> @@ -612,7 +638,7 @@ static unsigned long kfence_init_pool(void)
>  	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
>  	 * enters __slab_free() slow-path.
>  	 */
> -	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> +	for (i = 0; i < __kfence_pool_pages; i++) {
>  		struct page *page;
>  
>  		if (!i || (i % 2))
> @@ -640,7 +666,7 @@ static unsigned long kfence_init_pool(void)
>  		addr += PAGE_SIZE;
>  	}
>  
> -	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
> +	for (i = 0; i < __kfence_num_objects; i++) {
>  		struct kfence_metadata *meta = &kfence_metadata_init[i];
>  
>  		/* Initialize metadata. */
> @@ -666,7 +692,7 @@ static unsigned long kfence_init_pool(void)
>  	return 0;
>  
>  reset_slab:
> -	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
> +	for (i = 0; i < __kfence_pool_pages; i++) {
>  		struct page *page;
>  
>  		if (!i || (i % 2))
> @@ -710,7 +736,7 @@ static bool __init kfence_init_pool_early(void)
>  	 * fails for the first page, and therefore expect addr==__kfence_pool in
>  	 * most failure cases.
>  	 */
> -	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
> +	memblock_free_late(__pa(addr), __kfence_pool_size - (addr - (unsigned long)__kfence_pool));
>  	__kfence_pool = NULL;
>  
>  	memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
> @@ -740,7 +766,7 @@ DEFINE_SHOW_ATTRIBUTE(stats);
>   */
>  static void *start_object(struct seq_file *seq, loff_t *pos)
>  {
> -	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
> +	if (*pos < __kfence_num_objects)
>  		return (void *)((long)*pos + 1);
>  	return NULL;
>  }
> @@ -752,7 +778,7 @@ static void stop_object(struct seq_file *seq, void *v)
>  static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
>  {
>  	++*pos;
> -	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
> +	if (*pos < __kfence_num_objects)
>  		return (void *)((long)*pos + 1);
>  	return NULL;
>  }
> @@ -799,7 +825,7 @@ static void kfence_check_all_canary(void)
>  {
>  	int i;
>  
> -	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
> +	for (i = 0; i < __kfence_num_objects; i++) {
>  		struct kfence_metadata *meta = &kfence_metadata[i];
>  
>  		if (kfence_obj_allocated(meta))
> @@ -894,7 +920,7 @@ void __init kfence_alloc_pool_and_metadata(void)
>  	 * re-allocate the memory pool.
>  	 */
>  	if (!__kfence_pool)
> -		__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
> +		__kfence_pool = memblock_alloc(__kfence_pool_size, PAGE_SIZE);
>  
>  	if (!__kfence_pool) {
>  		pr_err("failed to allocate pool\n");
> @@ -903,11 +929,23 @@ void __init kfence_alloc_pool_and_metadata(void)
>  
>  	/* The memory allocated by memblock has been zeroed out. */
>  	kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
> -	if (!kfence_metadata_init) {
> -		pr_err("failed to allocate metadata\n");
> -		memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
> -		__kfence_pool = NULL;
> -	}
> +	if (!kfence_metadata_init)
> +		goto fail_pool;
> +
> +	kfence_alloc_covered_order = ilog2(__kfence_num_objects) + 2;
> +	kfence_alloc_covered_mask = (1 << kfence_alloc_covered_order) - 1;
> +	alloc_covered = memblock_alloc(KFENCE_COVERED_SIZE, PAGE_SIZE);
> +	if (alloc_covered)
> +		return;
> +
> +	pr_err("failed to allocate covered\n");
> +	memblock_free(kfence_metadata_init, KFENCE_METADATA_SIZE);
> +	kfence_metadata_init = NULL;
> +
> +fail_pool:
> +	pr_err("failed to allocate metadata\n");
> +	memblock_free(__kfence_pool, __kfence_pool_size);
> +	__kfence_pool = NULL;
>  }
>  
>  static void kfence_init_enable(void)
> @@ -930,9 +968,9 @@ static void kfence_init_enable(void)
>  	WRITE_ONCE(kfence_enabled, true);
>  	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
>  
> -	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
> -		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
> -		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
> +	pr_info("initialized - using %u bytes for %d objects at 0x%p-0x%p\n", __kfence_pool_size,
> +		__kfence_num_objects, (void *)__kfence_pool,
> +		(void *)(__kfence_pool + __kfence_pool_size));
>  }
>  
>  void __init kfence_init(void)
> @@ -953,41 +991,53 @@ void __init kfence_init(void)
>  
>  static int kfence_init_late(void)
>  {
> -	const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
> -	const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
> +	unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
>  	unsigned long addr = (unsigned long)__kfence_pool;
> -	unsigned long free_size = KFENCE_POOL_SIZE;
> +	unsigned long free_size = __kfence_pool_size;
> +	unsigned long nr_pages_covered, covered_size;
>  	int err = -ENOMEM;
>  
> +	kfence_alloc_covered_order = ilog2(__kfence_num_objects) + 2;
> +	kfence_alloc_covered_mask = (1 << kfence_alloc_covered_order) - 1;
> +	covered_size =  PAGE_ALIGN(KFENCE_COVERED_SIZE);
> +	nr_pages_covered = (covered_size / PAGE_SIZE);
>  #ifdef CONFIG_CONTIG_ALLOC
>  	struct page *pages;
>  
> -	pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
> +	pages = alloc_contig_pages(__kfence_pool_pages, GFP_KERNEL, first_online_node,
>  				   NULL);
>  	if (!pages)
>  		return -ENOMEM;
>  
>  	__kfence_pool = page_to_virt(pages);
> +	pages = alloc_contig_pages(nr_pages_covered, GFP_KERNEL, first_online_node,
> +				   NULL);
> +	if (!pages)
> +		goto free_pool;
> +	alloc_covered = page_to_virt(pages);
>  	pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
>  				   NULL);
>  	if (pages)
>  		kfence_metadata_init = page_to_virt(pages);
>  #else
> -	if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
> +	if (__kfence_pool_pages > MAX_ORDER_NR_PAGES ||
>  	    nr_pages_meta > MAX_ORDER_NR_PAGES) {
>  		pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
>  		return -EINVAL;
>  	}
>  
> -	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
> +	__kfence_pool = alloc_pages_exact(__kfence_pool_size, GFP_KERNEL);
>  	if (!__kfence_pool)
>  		return -ENOMEM;
>  
> +	alloc_covered = alloc_pages_exact(covered_size, GFP_KERNEL);
> +	if (!alloc_covered)
> +		goto free_pool;
>  	kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
>  #endif
>  
>  	if (!kfence_metadata_init)
> -		goto free_pool;
> +		goto free_cover;
>  
>  	memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
>  	addr = kfence_init_pool();
> @@ -998,22 +1048,28 @@ static int kfence_init_late(void)
>  	}
>  
>  	pr_err("%s failed\n", __func__);
> -	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
> +	free_size = __kfence_pool_size - (addr - (unsigned long)__kfence_pool);
>  	err = -EBUSY;
>  
>  #ifdef CONFIG_CONTIG_ALLOC
>  	free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
>  			  nr_pages_meta);
> +free_cover:
> +	free_contig_range(page_to_pfn(virt_to_page((void *)alloc_covered)),
> +			  nr_pages_covered);
>  free_pool:
>  	free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
>  			  free_size / PAGE_SIZE);
>  #else
>  	free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
> +free_cover:
> +	free_pages_exact((void *)alloc_covered, covered_size);
>  free_pool:
>  	free_pages_exact((void *)addr, free_size);
>  #endif
>  
>  	kfence_metadata_init = NULL;
> +	alloc_covered = NULL;
>  	__kfence_pool = NULL;
>  	return err;
>  }
> @@ -1039,7 +1095,7 @@ void kfence_shutdown_cache(struct kmem_cache *s)
>  	if (!smp_load_acquire(&kfence_metadata))
>  		return;
>  
> -	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
> +	for (i = 0; i < __kfence_num_objects; i++) {
>  		bool in_use;
>  
>  		meta = &kfence_metadata[i];
> @@ -1077,7 +1133,7 @@ void kfence_shutdown_cache(struct kmem_cache *s)
>  		}
>  	}
>  
> -	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
> +	for (i = 0; i < __kfence_num_objects; i++) {
>  		meta = &kfence_metadata[i];
>  
>  		/* See above. */
> diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
> index dfba5ea06b01..dc3abb27c632 100644
> --- a/mm/kfence/kfence.h
> +++ b/mm/kfence/kfence.h
> @@ -104,7 +104,7 @@ struct kfence_metadata {
>  };
>  
>  #define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \
> -					CONFIG_KFENCE_NUM_OBJECTS)
> +					__kfence_num_objects)
>  
>  extern struct kfence_metadata *kfence_metadata;
>  
> @@ -123,7 +123,7 @@ static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
>  	 * error.
>  	 */
>  	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
> -	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
> +	if (index < 0 || index >= __kfence_num_objects)
>  		return NULL;
>  
>  	return &kfence_metadata[index];
> diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
> index 00034e37bc9f..00a51aa4bad9 100644
> --- a/mm/kfence/kfence_test.c
> +++ b/mm/kfence/kfence_test.c
> @@ -641,7 +641,7 @@ static void test_gfpzero(struct kunit *test)
>  			break;
>  		test_free(buf2);
>  
> -		if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) {
> +		if (kthread_should_stop() || (i == __kfence_num_objects)) {
>  			kunit_warn(test, "giving up ... cannot get same object back\n");
>  			return;
>  		}
> -- 
> 2.25.1
RE: [PATCH v2 2/2] kfence: allow change number of object by early parameter
Posted by yuanlinyu 1 month, 3 weeks ago
> From: Marco Elver <elver@google.com>
> Sent: Thursday, December 18, 2025 4:57 PM
> To: yuanlinyu <yuanlinyu@honor.com>
> Cc: Alexander Potapenko <glider@google.com>; Dmitry Vyukov
> <dvyukov@google.com>; Andrew Morton <akpm@linux-foundation.org>;
> Huacai Chen <chenhuacai@kernel.org>; WANG Xuerui <kernel@xen0n.name>;
> kasan-dev@googlegroups.com; linux-mm@kvack.org; loongarch@lists.linux.dev;
> linux-kernel@vger.kernel.org
> Subject: Re: [PATCH v2 2/2] kfence: allow change number of object by early
> parameter
> 
> On Thu, Dec 18, 2025 at 02:39PM +0800, yuan linyu wrote:
> > when want to change the kfence pool size, currently it is not easy and
> > need to compile kernel.
> >
> > Add an early boot parameter kfence.num_objects to allow change kfence
> > objects number and allow increate total pool to provide high failure
> > rate.
> >
> > Signed-off-by: yuan linyu <yuanlinyu@honor.com>
> > ---
> >  include/linux/kfence.h  |   5 +-
> >  mm/kfence/core.c        | 122
> +++++++++++++++++++++++++++++-----------
> >  mm/kfence/kfence.h      |   4 +-
> >  mm/kfence/kfence_test.c |   2 +-
> >  4 files changed, 96 insertions(+), 37 deletions(-)
> >
> > diff --git a/include/linux/kfence.h b/include/linux/kfence.h
> > index 0ad1ddbb8b99..920bcd5649fa 100644
> > --- a/include/linux/kfence.h
> > +++ b/include/linux/kfence.h
> > @@ -24,7 +24,10 @@ extern unsigned long kfence_sample_interval;
> >   * address to metadata indices; effectively, the very first page serves as an
> >   * extended guard page, but otherwise has no special purpose.
> >   */
> > -#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 *
> PAGE_SIZE)
> > +extern unsigned int __kfence_pool_size;
> > +#define KFENCE_POOL_SIZE (__kfence_pool_size)
> > +extern unsigned int __kfence_num_objects;
> > +#define KFENCE_NUM_OBJECTS (__kfence_num_objects)
> >  extern char *__kfence_pool;
> >
> 
> You have ignored the comment below in this file:
> 
> 	/**
> 	 * is_kfence_address() - check if an address belongs to KFENCE pool
> 	 * @addr: address to check
> 	 *
> 	[...]
> 	 * Note: This function may be used in fast-paths, and is performance
> critical.
> 	 * Future changes should take this into account; for instance, we want to
> avoid
>    >>	 * introducing another load and therefore need to keep
> KFENCE_POOL_SIZE a
>    >>	 * constant (until immediate patching support is added to the kernel).
> 	 */
> 	static __always_inline bool is_kfence_address(const void *addr)
> 	{
> 		/*
> 		 * The __kfence_pool != NULL check is required to deal with the case
> 		 * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE.
> Keep it in
> 		 * the slow-path after the range-check!
> 		 */
> 		return unlikely((unsigned long)((char *)addr - __kfence_pool) <
> KFENCE_POOL_SIZE && __kfence_pool);
> 	}

Do you mean performance critical by access global data ?
It already access __kfence_pool global data.
Add one more global data acceptable here ?

Other place may access global data indeed ?


I don't know if all linux release like ubuntu enable kfence or not.
I only know it turn on default on android device.


> 
> While I think the change itself would be useful to have eventually, a
> better design might be needed. It's unclear to me what the perf impact

Could you share the better design idea ?

> is these days (a lot has changed since that comment was written). Could
> you run some benchmarks to analyze if the fast path is affected by the
> additional load (please do this for whichever arch you care about, but
> also arm64 and x86)?
> 
> If performance is affected, all this could be guarded behind another
> Kconfig option, but it's not great either.

what kind of option ? 
It already have kconfig option to define the number of objects, here just provide
a parameter for the same option which user can change.

> 
> > --
> > 2.25.1
Re: [PATCH v2 2/2] kfence: allow change number of object by early parameter
Posted by Marco Elver 1 month, 3 weeks ago
On Thu, 18 Dec 2025 at 11:18, yuanlinyu <yuanlinyu@honor.com> wrote:
>
> > From: Marco Elver <elver@google.com>
> > Sent: Thursday, December 18, 2025 4:57 PM
> > To: yuanlinyu <yuanlinyu@honor.com>
> > Cc: Alexander Potapenko <glider@google.com>; Dmitry Vyukov
> > <dvyukov@google.com>; Andrew Morton <akpm@linux-foundation.org>;
> > Huacai Chen <chenhuacai@kernel.org>; WANG Xuerui <kernel@xen0n.name>;
> > kasan-dev@googlegroups.com; linux-mm@kvack.org; loongarch@lists.linux.dev;
> > linux-kernel@vger.kernel.org
> > Subject: Re: [PATCH v2 2/2] kfence: allow change number of object by early
> > parameter
> >
> > On Thu, Dec 18, 2025 at 02:39PM +0800, yuan linyu wrote:
> > > when want to change the kfence pool size, currently it is not easy and
> > > need to compile kernel.
> > >
> > > Add an early boot parameter kfence.num_objects to allow change kfence
> > > objects number and allow increate total pool to provide high failure
> > > rate.
> > >
> > > Signed-off-by: yuan linyu <yuanlinyu@honor.com>
> > > ---
> > >  include/linux/kfence.h  |   5 +-
> > >  mm/kfence/core.c        | 122
> > +++++++++++++++++++++++++++++-----------
> > >  mm/kfence/kfence.h      |   4 +-
> > >  mm/kfence/kfence_test.c |   2 +-
> > >  4 files changed, 96 insertions(+), 37 deletions(-)
> > >
> > > diff --git a/include/linux/kfence.h b/include/linux/kfence.h
> > > index 0ad1ddbb8b99..920bcd5649fa 100644
> > > --- a/include/linux/kfence.h
> > > +++ b/include/linux/kfence.h
> > > @@ -24,7 +24,10 @@ extern unsigned long kfence_sample_interval;
> > >   * address to metadata indices; effectively, the very first page serves as an
> > >   * extended guard page, but otherwise has no special purpose.
> > >   */
> > > -#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 *
> > PAGE_SIZE)
> > > +extern unsigned int __kfence_pool_size;
> > > +#define KFENCE_POOL_SIZE (__kfence_pool_size)
> > > +extern unsigned int __kfence_num_objects;
> > > +#define KFENCE_NUM_OBJECTS (__kfence_num_objects)
> > >  extern char *__kfence_pool;
> > >
> >
> > You have ignored the comment below in this file:
> >
> >       /**
> >        * is_kfence_address() - check if an address belongs to KFENCE pool
> >        * @addr: address to check
> >        *
> >       [...]
> >        * Note: This function may be used in fast-paths, and is performance
> > critical.
> >        * Future changes should take this into account; for instance, we want to
> > avoid
> >    >>  * introducing another load and therefore need to keep
> > KFENCE_POOL_SIZE a
> >    >>  * constant (until immediate patching support is added to the kernel).
> >        */
> >       static __always_inline bool is_kfence_address(const void *addr)
> >       {
> >               /*
> >                * The __kfence_pool != NULL check is required to deal with the case
> >                * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE.
> > Keep it in
> >                * the slow-path after the range-check!
> >                */
> >               return unlikely((unsigned long)((char *)addr - __kfence_pool) <
> > KFENCE_POOL_SIZE && __kfence_pool);
> >       }
>
> Do you mean performance critical by access global data ?
> It already access __kfence_pool global data.
> Add one more global data acceptable here ?
>
> Other place may access global data indeed ?

is_kfence_address() is used in the slub fast path, and another load is
one more instruction in the fast path. We have avoided this thus far
for this reason.

> I don't know if all linux release like ubuntu enable kfence or not.
> I only know it turn on default on android device.

This is irrelevant.

> > While I think the change itself would be useful to have eventually, a
> > better design might be needed. It's unclear to me what the perf impact
>
> Could you share the better design idea ?

Hot-patchable constants, similar to static branches/jump labels. This
had been discussed in the past (can't find the link now), but it's not
trivial to implement unfortunately.

> > is these days (a lot has changed since that comment was written). Could
> > you run some benchmarks to analyze if the fast path is affected by the
> > additional load (please do this for whichever arch you care about, but
> > also arm64 and x86)?
> >
> > If performance is affected, all this could be guarded behind another
> > Kconfig option, but it's not great either.
>
> what kind of option ?
> It already have kconfig option to define the number of objects, here just provide
> a parameter for the same option which user can change.

An option that would enable/disable the command-line changeable number
of objects, i.e one version that avoids the load in the fast path and
one version that enables all the bits that you added here. But I'd
rather avoid this if possible.

As such, please do benchmark and analyze the generated code in the
allocator fast path (you should see a load to the new global you
added). llvm-mca [1] might help you with analysis.

[1] https://llvm.org/docs/CommandGuide/llvm-mca.html
RE: [PATCH v2 2/2] kfence: allow change number of object by early parameter
Posted by yuanlinyu 1 month, 1 week ago
> From: Marco Elver <elver@google.com>
> Sent: Thursday, December 18, 2025 6:24 PM
> To: yuanlinyu <yuanlinyu@honor.com>
> Cc: Alexander Potapenko <glider@google.com>; Dmitry Vyukov
> <dvyukov@google.com>; Andrew Morton <akpm@linux-foundation.org>;
> Huacai Chen <chenhuacai@kernel.org>; WANG Xuerui <kernel@xen0n.name>;
> kasan-dev@googlegroups.com; linux-mm@kvack.org; loongarch@lists.linux.dev;
> linux-kernel@vger.kernel.org
> Subject: Re: [PATCH v2 2/2] kfence: allow change number of object by early
> parameter


> > Could you share the better design idea ?
> 
> Hot-patchable constants, similar to static branches/jump labels. This
> had been discussed in the past (can't find the link now), but it's not
> trivial to implement unfortunately.
> 

Hi Marco,

If you have concern about one more global, 

how about below code ?

/* The pool of pages used for guard pages and objects with number of objects at lower bits . */
unsigned long __kfence_pool_objects __read_mostly;

static __always_inline bool is_kfence_address(const void *addr)
{
	return unlikely((unsigned long)((char *)addr - KFENCE_POOL_ADDR) < KFENCE_POOL_LEN && __kfence_pool_objects);
}

It may generate one or two more instruction when compare with original patch.


RE: [PATCH v2 2/2] kfence: allow change number of object by early parameter
Posted by yuanlinyu 1 month, 3 weeks ago
> From: Marco Elver <elver@google.com>
> Sent: Thursday, December 18, 2025 6:24 PM
> To: yuanlinyu <yuanlinyu@honor.com>
> Cc: Alexander Potapenko <glider@google.com>; Dmitry Vyukov
> <dvyukov@google.com>; Andrew Morton <akpm@linux-foundation.org>;
> Huacai Chen <chenhuacai@kernel.org>; WANG Xuerui <kernel@xen0n.name>;
> kasan-dev@googlegroups.com; linux-mm@kvack.org; loongarch@lists.linux.dev;
> linux-kernel@vger.kernel.org
> Subject: Re: [PATCH v2 2/2] kfence: allow change number of object by early
> parameter
> 
> On Thu, 18 Dec 2025 at 11:18, yuanlinyu <yuanlinyu@honor.com> wrote:
> >
> > > From: Marco Elver <elver@google.com>
> > Do you mean performance critical by access global data ?
> > It already access __kfence_pool global data.
> > Add one more global data acceptable here ?
> >
> > Other place may access global data indeed ?
> 
> is_kfence_address() is used in the slub fast path, and another load is
> one more instruction in the fast path. We have avoided this thus far
> for this reason.
> 
> > I don't know if all linux release like ubuntu enable kfence or not.
> > I only know it turn on default on android device.
> 
> This is irrelevant.
> 
> > > While I think the change itself would be useful to have eventually, a
> > > better design might be needed. It's unclear to me what the perf impact
> >
> > Could you share the better design idea ?
> 
> Hot-patchable constants, similar to static branches/jump labels. This
> had been discussed in the past (can't find the link now), but it's not
> trivial to implement unfortunately.

is it possible add tag to kfence address and only check address itself ?

> 
> An option that would enable/disable the command-line changeable number
> of objects, i.e one version that avoids the load in the fast path and
> one version that enables all the bits that you added here. But I'd
> rather avoid this if possible.

Yes, it should avoid, the purpose is without compile the kernel.

> 
> As such, please do benchmark and analyze the generated code in the
> allocator fast path (you should see a load to the new global you
> added). llvm-mca [1] might help you with analysis.
> 
> [1] https://llvm.org/docs/CommandGuide/llvm-mca.html

Thanks, will learn it