.../admin-guide/kernel-parameters.txt | 5 ++ lib/stackdepot.c | 67 ++++++++++++++++--- 2 files changed, 63 insertions(+), 9 deletions(-)
From: Matt Fleming <mfleming@cloudflare.com>
We're hitting the WARN in depot_init_pool() about reaching the stack
depot limit because we have long stacks that don't dedup very well.
Introduce a new start-up parameter to allow users to set the number of
maximum stack depot pools.
Signed-off-by: Matt Fleming <mfleming@cloudflare.com>
---
Changes in v4:
- Simplify initial stack_max_pools value courtesy of akpm
Changes in v3:
- Switch from build-time to boot-time parameter
Changes in v2:
- Replace BUILD_BUG_ON with static_assert()
- Hide STACKDEPOT_MAX_POOLS behind EXPERT
.../admin-guide/kernel-parameters.txt | 5 ++
lib/stackdepot.c | 67 ++++++++++++++++---
2 files changed, 63 insertions(+), 9 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 07e22ba5bfe3..273f6bbd56c2 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -6988,6 +6988,11 @@
consumed by the stack hash table. By default this is set
to false.
+ stack_depot_max_pools= [KNL,EARLY]
+ Specify the maximum number of pools to use for storing
+ stack traces. Pools are allocated on-demand up to this
+ limit. Default value is 8191 pools.
+
stacktrace [FTRACE]
Enabled the stack tracer on boot up.
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 73d7b50924ef..de0b0025af2b 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -36,11 +36,11 @@
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>
-#define DEPOT_POOLS_CAP 8192
-/* The pool_index is offset by 1 so the first record does not have a 0 handle. */
-#define DEPOT_MAX_POOLS \
- (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \
- (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP)
+/*
+ * The pool_index is offset by 1 so the first record does not have a 0 handle.
+ */
+static unsigned int stack_max_pools __read_mostly =
+ MIN((1LL << DEPOT_POOL_INDEX_BITS) - 1, 8192);
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
@@ -62,7 +62,7 @@ static unsigned int stack_bucket_number_order;
static unsigned int stack_hash_mask;
/* Array of memory regions that store stack records. */
-static void *stack_pools[DEPOT_MAX_POOLS];
+static void **stack_pools;
/* Newly allocated pool that is not yet added to stack_pools. */
static void *new_pool;
/* Number of pools in stack_pools. */
@@ -101,6 +101,34 @@ static int __init disable_stack_depot(char *str)
}
early_param("stack_depot_disable", disable_stack_depot);
+static int __init parse_max_pools(char *str)
+{
+ const long long limit = (1LL << (DEPOT_POOL_INDEX_BITS)) - 1;
+ unsigned int max_pools;
+ int rv;
+
+ rv = kstrtouint(str, 0, &max_pools);
+ if (rv)
+ return rv;
+
+ if (max_pools < 1024) {
+ pr_err("stack_depot_max_pools below 1024, using default of %u\n",
+ stack_max_pools);
+ goto out;
+ }
+
+ if (max_pools > limit) {
+ pr_err("stack_depot_max_pools exceeds %lld, using default of %u\n",
+ limit, stack_max_pools);
+ goto out;
+ }
+
+ stack_max_pools = max_pools;
+out:
+ return 0;
+}
+early_param("stack_depot_max_pools", parse_max_pools);
+
void __init stack_depot_request_early_init(void)
{
/* Too late to request early init now. */
@@ -182,6 +210,17 @@ int __init stack_depot_early_init(void)
}
init_stack_table(entries);
+ pr_info("allocating space for %u stack pools via memblock\n",
+ stack_max_pools);
+ stack_pools =
+ memblock_alloc(stack_max_pools * sizeof(void *), PAGE_SIZE);
+ if (!stack_pools) {
+ pr_err("stack pools allocation failed, disabling\n");
+ memblock_free(stack_table, entries * sizeof(struct list_head));
+ stack_depot_disabled = true;
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -231,6 +270,16 @@ int stack_depot_init(void)
stack_hash_mask = entries - 1;
init_stack_table(entries);
+ pr_info("allocating space for %u stack pools via kvcalloc\n",
+ stack_max_pools);
+ stack_pools = kvcalloc(stack_max_pools, sizeof(void *), GFP_KERNEL);
+ if (!stack_pools) {
+ pr_err("stack pools allocation failed, disabling\n");
+ kvfree(stack_table);
+ stack_depot_disabled = true;
+ ret = -ENOMEM;
+ }
+
out_unlock:
mutex_unlock(&stack_depot_init_mutex);
@@ -245,9 +294,9 @@ static bool depot_init_pool(void **prealloc)
{
lockdep_assert_held(&pool_lock);
- if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ if (unlikely(pools_num >= stack_max_pools)) {
/* Bail out if we reached the pool limit. */
- WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */
+ WARN_ON_ONCE(pools_num > stack_max_pools); /* should never happen */
WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */
WARN_ONCE(1, "Stack depot reached limit capacity");
return false;
@@ -273,7 +322,7 @@ static bool depot_init_pool(void **prealloc)
* NULL; do not reset to NULL if we have reached the maximum number of
* pools.
*/
- if (pools_num < DEPOT_MAX_POOLS)
+ if (pools_num < stack_max_pools)
WRITE_ONCE(new_pool, NULL);
else
WRITE_ONCE(new_pool, STACK_DEPOT_POISON);
--
2.34.1
On 7/18/25 17:39, Matt Fleming wrote: > From: Matt Fleming <mfleming@cloudflare.com> > > We're hitting the WARN in depot_init_pool() about reaching the stack > depot limit because we have long stacks that don't dedup very well. > > Introduce a new start-up parameter to allow users to set the number of > maximum stack depot pools. > > Signed-off-by: Matt Fleming <mfleming@cloudflare.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> > --- a/lib/stackdepot.c > +++ b/lib/stackdepot.c > @@ -36,11 +36,11 @@ > #include <linux/memblock.h> > #include <linux/kasan-enabled.h> > > -#define DEPOT_POOLS_CAP 8192 > -/* The pool_index is offset by 1 so the first record does not have a 0 handle. */ > -#define DEPOT_MAX_POOLS \ > - (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \ > - (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP) > +/* > + * The pool_index is offset by 1 so the first record does not have a 0 handle. > + */ > +static unsigned int stack_max_pools __read_mostly = > + MIN((1LL << DEPOT_POOL_INDEX_BITS) - 1, 8192); > > static bool stack_depot_disabled; > static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); > @@ -62,7 +62,7 @@ static unsigned int stack_bucket_number_order; > static unsigned int stack_hash_mask; > > /* Array of memory regions that store stack records. */ > -static void *stack_pools[DEPOT_MAX_POOLS]; > +static void **stack_pools; Also nice that (AFAIU) we now save this previously statically allocated array's memory if stackdepot is built-in but not used by anything. > /* Newly allocated pool that is not yet added to stack_pools. */ > static void *new_pool; > /* Number of pools in stack_pools. */ > @@ -101,6 +101,34 @@ static int __init disable_stack_depot(char *str) > } > early_param("stack_depot_disable", disable_stack_depot); > > +static int __init parse_max_pools(char *str) > +{ > + const long long limit = (1LL << (DEPOT_POOL_INDEX_BITS)) - 1; > + unsigned int max_pools; > + int rv; > + > + rv = kstrtouint(str, 0, &max_pools); > + if (rv) > + return rv; > + > + if (max_pools < 1024) { > + pr_err("stack_depot_max_pools below 1024, using default of %u\n", > + stack_max_pools); > + goto out; > + } > + > + if (max_pools > limit) { > + pr_err("stack_depot_max_pools exceeds %lld, using default of %u\n", > + limit, stack_max_pools); > + goto out; > + } Maybe we could bound the requested value to between 1024 and limit instead of falling back to the default and completely ignoring it when it's out of the range? > + stack_max_pools = max_pools; > +out: > + return 0; > +} > +early_param("stack_depot_max_pools", parse_max_pools); > +
On Fri, 18 Jul 2025 at 17:39, Matt Fleming <matt@readmodwrite.com> wrote: > > From: Matt Fleming <mfleming@cloudflare.com> > > We're hitting the WARN in depot_init_pool() about reaching the stack > depot limit because we have long stacks that don't dedup very well. > > Introduce a new start-up parameter to allow users to set the number of > maximum stack depot pools. > > Signed-off-by: Matt Fleming <mfleming@cloudflare.com> Acked-by: Marco Elver <elver@google.com> Minor improvement suggestions below (at your discretion). Thanks! > --- > > Changes in v4: > - Simplify initial stack_max_pools value courtesy of akpm > > Changes in v3: > - Switch from build-time to boot-time parameter > > Changes in v2: > - Replace BUILD_BUG_ON with static_assert() > - Hide STACKDEPOT_MAX_POOLS behind EXPERT > > .../admin-guide/kernel-parameters.txt | 5 ++ > lib/stackdepot.c | 67 ++++++++++++++++--- > 2 files changed, 63 insertions(+), 9 deletions(-) > > diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt > index 07e22ba5bfe3..273f6bbd56c2 100644 > --- a/Documentation/admin-guide/kernel-parameters.txt > +++ b/Documentation/admin-guide/kernel-parameters.txt > @@ -6988,6 +6988,11 @@ > consumed by the stack hash table. By default this is set > to false. > > + stack_depot_max_pools= [KNL,EARLY] > + Specify the maximum number of pools to use for storing > + stack traces. Pools are allocated on-demand up to this > + limit. Default value is 8191 pools. > + > stacktrace [FTRACE] > Enabled the stack tracer on boot up. > > diff --git a/lib/stackdepot.c b/lib/stackdepot.c > index 73d7b50924ef..de0b0025af2b 100644 > --- a/lib/stackdepot.c > +++ b/lib/stackdepot.c > @@ -36,11 +36,11 @@ > #include <linux/memblock.h> > #include <linux/kasan-enabled.h> > > -#define DEPOT_POOLS_CAP 8192 > -/* The pool_index is offset by 1 so the first record does not have a 0 handle. */ > -#define DEPOT_MAX_POOLS \ > - (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \ > - (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP) > +/* > + * The pool_index is offset by 1 so the first record does not have a 0 handle. > + */ > +static unsigned int stack_max_pools __read_mostly = This could be __ro_after_init. > + MIN((1LL << DEPOT_POOL_INDEX_BITS) - 1, 8192); > > static bool stack_depot_disabled; > static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); > @@ -62,7 +62,7 @@ static unsigned int stack_bucket_number_order; > static unsigned int stack_hash_mask; > > /* Array of memory regions that store stack records. */ > -static void *stack_pools[DEPOT_MAX_POOLS]; > +static void **stack_pools; > /* Newly allocated pool that is not yet added to stack_pools. */ > static void *new_pool; > /* Number of pools in stack_pools. */ > @@ -101,6 +101,34 @@ static int __init disable_stack_depot(char *str) > } > early_param("stack_depot_disable", disable_stack_depot); > > +static int __init parse_max_pools(char *str) > +{ > + const long long limit = (1LL << (DEPOT_POOL_INDEX_BITS)) - 1; > + unsigned int max_pools; > + int rv; > + > + rv = kstrtouint(str, 0, &max_pools); > + if (rv) > + return rv; > + > + if (max_pools < 1024) { > + pr_err("stack_depot_max_pools below 1024, using default of %u\n", > + stack_max_pools); nit: I feel the choice of 1024 should be somewhat more documented, because right now it looks like a magic value. > + goto out; > + } > + > + if (max_pools > limit) { > + pr_err("stack_depot_max_pools exceeds %lld, using default of %u\n", > + limit, stack_max_pools); > + goto out; > + } > + > + stack_max_pools = max_pools; > +out: > + return 0; > +} > +early_param("stack_depot_max_pools", parse_max_pools); > + > void __init stack_depot_request_early_init(void) > { > /* Too late to request early init now. */ > @@ -182,6 +210,17 @@ int __init stack_depot_early_init(void) > } > init_stack_table(entries); > > + pr_info("allocating space for %u stack pools via memblock\n", > + stack_max_pools); > + stack_pools = > + memblock_alloc(stack_max_pools * sizeof(void *), PAGE_SIZE); Bike-shedding territory: Kernel allows up to 120 cols if it helps readability. Line-break-assignments just look wrong and clang-format should be fixed. Regardless, I know some folks just want to follow what clang-format suggests without much consideration for readability. > + if (!stack_pools) { > + pr_err("stack pools allocation failed, disabling\n"); > + memblock_free(stack_table, entries * sizeof(struct list_head)); > + stack_depot_disabled = true; > + return -ENOMEM; > + } > + > return 0; > } > > @@ -231,6 +270,16 @@ int stack_depot_init(void) > stack_hash_mask = entries - 1; > init_stack_table(entries); > > + pr_info("allocating space for %u stack pools via kvcalloc\n", > + stack_max_pools); > + stack_pools = kvcalloc(stack_max_pools, sizeof(void *), GFP_KERNEL); > + if (!stack_pools) { > + pr_err("stack pools allocation failed, disabling\n"); > + kvfree(stack_table); > + stack_depot_disabled = true; > + ret = -ENOMEM; > + } > + > out_unlock: > mutex_unlock(&stack_depot_init_mutex); > > @@ -245,9 +294,9 @@ static bool depot_init_pool(void **prealloc) > { > lockdep_assert_held(&pool_lock); > > - if (unlikely(pools_num >= DEPOT_MAX_POOLS)) { > + if (unlikely(pools_num >= stack_max_pools)) { > /* Bail out if we reached the pool limit. */ > - WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */ > + WARN_ON_ONCE(pools_num > stack_max_pools); /* should never happen */ > WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */ > WARN_ONCE(1, "Stack depot reached limit capacity"); > return false; > @@ -273,7 +322,7 @@ static bool depot_init_pool(void **prealloc) > * NULL; do not reset to NULL if we have reached the maximum number of > * pools. > */ > - if (pools_num < DEPOT_MAX_POOLS) > + if (pools_num < stack_max_pools) > WRITE_ONCE(new_pool, NULL); > else > WRITE_ONCE(new_pool, STACK_DEPOT_POISON); > -- > 2.34.1 >
© 2016 - 2025 Red Hat, Inc.