mm/kfence/core.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-)
Randomize the KFENCE freelist during pool initialization to make allocation
patterns less predictable. This is achieved by shuffling the order in which
metadata objects are added to the freelist using get_random_u32_below().
Additionally, ensure the error path correctly calculates the address range
to be reset if initialization fails, as the address increment logic has
been moved to a separate loop.
Cc: stable@vger.kernel.org
Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
Signed-off-by: Pimyn Girgis <pimyn@google.com>
---
mm/kfence/core.c | 23 +++++++++++++++++++----
1 file changed, 19 insertions(+), 4 deletions(-)
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 577a1699c553..9e8b3cfd3f76 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -596,7 +596,7 @@ static void rcu_guarded_free(struct rcu_head *h)
static unsigned long kfence_init_pool(void)
{
unsigned long addr, start_pfn;
- int i;
+ int i, rand;
if (!arch_kfence_init_pool())
return (unsigned long)__kfence_pool;
@@ -647,13 +647,27 @@ static unsigned long kfence_init_pool(void)
INIT_LIST_HEAD(&meta->list);
raw_spin_lock_init(&meta->lock);
meta->state = KFENCE_OBJECT_UNUSED;
- meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
- list_add_tail(&meta->list, &kfence_freelist);
+ /* Use addr to randomize the freelist. */
+ meta->addr = i;
/* Protect the right redzone. */
- if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
+ if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE)))
goto reset_slab;
+ }
+
+ for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) {
+ rand = get_random_u32_below(i);
+ swap(kfence_metadata_init[i - 1].addr, kfence_metadata_init[rand].addr);
+ }
+ for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+ struct kfence_metadata *meta_1 = &kfence_metadata_init[i];
+ struct kfence_metadata *meta_2 = &kfence_metadata_init[meta_1->addr];
+
+ list_add_tail(&meta_2->list, &kfence_freelist);
+ }
+ for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
+ kfence_metadata_init[i].addr = addr;
addr += 2 * PAGE_SIZE;
}
@@ -666,6 +680,7 @@ static unsigned long kfence_init_pool(void)
return 0;
reset_slab:
+ addr += 2 * i * PAGE_SIZE;
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
struct page *page;
--
2.52.0.457.g6b5491de43-goog
On Tue, Jan 20, 2026 at 5:16 PM Pimyn Girgis <pimyn@google.com> wrote:
>
> Randomize the KFENCE freelist during pool initialization to make allocation
> patterns less predictable. This is achieved by shuffling the order in which
> metadata objects are added to the freelist using get_random_u32_below().
>
> Additionally, ensure the error path correctly calculates the address range
> to be reset if initialization fails, as the address increment logic has
> been moved to a separate loop.
>
> Cc: stable@vger.kernel.org
> Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
> Signed-off-by: Pimyn Girgis <pimyn@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
On Tue, 20 Jan 2026 17:15:10 +0100 Pimyn Girgis <pimyn@google.com> wrote:
> Randomize the KFENCE freelist during pool initialization to make allocation
> patterns less predictable. This is achieved by shuffling the order in which
> metadata objects are added to the freelist using get_random_u32_below().
>
> Additionally, ensure the error path correctly calculates the address range
> to be reset if initialization fails, as the address increment logic has
> been moved to a separate loop.
>
> Cc: stable@vger.kernel.org
> Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
It isn't clear (to me) what was wrong with 0ce20dd84089, nor why a
-stable backport is proposed.
Can we please have a full description of the current misbehavior? What
are the worst-case userspace-visible effects of this flaw?
© 2016 - 2026 Red Hat, Inc.