From: Peter Zijlstra <peterz@infradead.org>
Runtime constify the read-only after init data __futex_shift(shift_32),
__futex_mask(mask_32), and __futex_queues(ptr) used in __futex_hash()
hot path to avoid referencing global variable.
This also allows __futex_queues to be allocated dynamically to
"nr_node_ids" slots instead of reserving config dependent MAX_NUMNODES
(1 << CONFIG_NODES_SHIFT) worth of slots upfront.
No functional chages intended.
[ prateek: Dynamically allocate __futex_queues, mark the global data
__ro_after_init since they are constified after futex_init(). ]
Link: https://patch.msgid.link/20260227161841.GH606826@noisy.programming.kicks-ass.net
Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> # MAX_NUMNODES bloat
Not-yet-signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
---
include/asm-generic/vmlinux.lds.h | 5 +++-
kernel/futex/core.c | 42 +++++++++++++++++--------------
2 files changed, 27 insertions(+), 20 deletions(-)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 1e1580febe4b..86f99fa6ae24 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -975,7 +975,10 @@
RUNTIME_CONST(shift, d_hash_shift) \
RUNTIME_CONST(ptr, dentry_hashtable) \
RUNTIME_CONST(ptr, __dentry_cache) \
- RUNTIME_CONST(ptr, __names_cache)
+ RUNTIME_CONST(ptr, __names_cache) \
+ RUNTIME_CONST(shift, __futex_shift) \
+ RUNTIME_CONST(mask, __futex_mask) \
+ RUNTIME_CONST(ptr, __futex_queues)
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index cf7e610eac42..6b5c5a1596a5 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -45,23 +45,19 @@
#include <linux/mempolicy.h>
#include <linux/mmap_lock.h>
+#include <asm/runtime-const.h>
+
#include "futex.h"
#include "../locking/rtmutex_common.h"
-/*
- * The base of the bucket array and its size are always used together
- * (after initialization only in futex_hash()), so ensure that they
- * reside in the same cacheline.
- */
-static struct {
- unsigned long hashmask;
- unsigned int hashshift;
- struct futex_hash_bucket *queues[MAX_NUMNODES];
-} __futex_data __read_mostly __aligned(2*sizeof(long));
+static u32 __futex_mask __ro_after_init;
+static u32 __futex_shift __ro_after_init;
+static struct futex_hash_bucket **__futex_queues __ro_after_init;
-#define futex_hashmask (__futex_data.hashmask)
-#define futex_hashshift (__futex_data.hashshift)
-#define futex_queues (__futex_data.queues)
+static __always_inline struct futex_hash_bucket **futex_queues(void)
+{
+ return runtime_const_ptr(__futex_queues);
+}
struct futex_private_hash {
int state;
@@ -439,14 +435,14 @@ __futex_hash(union futex_key *key, struct futex_private_hash *fph)
* NOTE: this isn't perfectly uniform, but it is fast and
* handles sparse node masks.
*/
- node = (hash >> futex_hashshift) % nr_node_ids;
+ node = runtime_const_shift_right_32(hash, __futex_shift) % nr_node_ids;
if (!node_possible(node)) {
node = find_next_bit_wrap(node_possible_map.bits,
nr_node_ids, node);
}
}
- return &futex_queues[node][hash & futex_hashmask];
+ return &futex_queues()[node][runtime_const_mask_32(hash, __futex_mask)];
}
/**
@@ -1913,7 +1909,7 @@ int futex_hash_allocate_default(void)
* 16 <= threads * 4 <= global hash size
*/
buckets = roundup_pow_of_two(4 * threads);
- buckets = clamp(buckets, 16, futex_hashmask + 1);
+ buckets = clamp(buckets, 16, __futex_mask + 1);
if (current_buckets >= buckets)
return 0;
@@ -1983,10 +1979,19 @@ static int __init futex_init(void)
hashsize = max(4, hashsize);
hashsize = roundup_pow_of_two(hashsize);
#endif
- futex_hashshift = ilog2(hashsize);
+ __futex_mask = hashsize - 1;
+ __futex_shift = ilog2(hashsize);
size = sizeof(struct futex_hash_bucket) * hashsize;
order = get_order(size);
+ __futex_queues = kcalloc(nr_node_ids, sizeof(*__futex_queues), GFP_KERNEL);
+
+ runtime_const_init(shift, __futex_shift);
+ runtime_const_init(mask, __futex_mask);
+ runtime_const_init(ptr, __futex_queues);
+
+ BUG_ON(!futex_queues());
+
for_each_node(n) {
struct futex_hash_bucket *table;
@@ -2000,10 +2005,9 @@ static int __init futex_init(void)
for (i = 0; i < hashsize; i++)
futex_hash_bucket_init(&table[i], NULL);
- futex_queues[n] = table;
+ futex_queues()[n] = table;
}
- futex_hashmask = hashsize - 1;
pr_info("futex hash table entries: %lu (%lu bytes on %d NUMA nodes, total %lu KiB, %s).\n",
hashsize, size, num_possible_nodes(), size * num_possible_nodes() / 1024,
order > MAX_PAGE_ORDER ? "vmalloc" : "linear");
--
2.43.0
Hi Prateek,
On 2026-03-16 12:24 AM, K Prateek Nayak wrote:
> From: Peter Zijlstra <peterz@infradead.org>
>
> Runtime constify the read-only after init data __futex_shift(shift_32),
> __futex_mask(mask_32), and __futex_queues(ptr) used in __futex_hash()
> hot path to avoid referencing global variable.
>
> This also allows __futex_queues to be allocated dynamically to
> "nr_node_ids" slots instead of reserving config dependent MAX_NUMNODES
> (1 << CONFIG_NODES_SHIFT) worth of slots upfront.
>
> No functional chages intended.
>
> [ prateek: Dynamically allocate __futex_queues, mark the global data
> __ro_after_init since they are constified after futex_init(). ]
>
> Link: https://patch.msgid.link/20260227161841.GH606826@noisy.programming.kicks-ass.net
> Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> # MAX_NUMNODES bloat
> Not-yet-signed-off-by: Peter Zijlstra <peterz@infradead.org>
> Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
> ---
> include/asm-generic/vmlinux.lds.h | 5 +++-
> kernel/futex/core.c | 42 +++++++++++++++++--------------
> 2 files changed, 27 insertions(+), 20 deletions(-)
>
> diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
> index 1e1580febe4b..86f99fa6ae24 100644
> --- a/include/asm-generic/vmlinux.lds.h
> +++ b/include/asm-generic/vmlinux.lds.h
> @@ -975,7 +975,10 @@
> RUNTIME_CONST(shift, d_hash_shift) \
> RUNTIME_CONST(ptr, dentry_hashtable) \
> RUNTIME_CONST(ptr, __dentry_cache) \
> - RUNTIME_CONST(ptr, __names_cache)
> + RUNTIME_CONST(ptr, __names_cache) \
> + RUNTIME_CONST(shift, __futex_shift) \
> + RUNTIME_CONST(mask, __futex_mask) \
> + RUNTIME_CONST(ptr, __futex_queues)
>
> /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
> #define KUNIT_TABLE() \
> diff --git a/kernel/futex/core.c b/kernel/futex/core.c
> index cf7e610eac42..6b5c5a1596a5 100644
> --- a/kernel/futex/core.c
> +++ b/kernel/futex/core.c
> @@ -45,23 +45,19 @@
> #include <linux/mempolicy.h>
> #include <linux/mmap_lock.h>
>
> +#include <asm/runtime-const.h>
> +
> #include "futex.h"
> #include "../locking/rtmutex_common.h"
>
> -/*
> - * The base of the bucket array and its size are always used together
> - * (after initialization only in futex_hash()), so ensure that they
> - * reside in the same cacheline.
> - */
> -static struct {
> - unsigned long hashmask;
> - unsigned int hashshift;
> - struct futex_hash_bucket *queues[MAX_NUMNODES];
> -} __futex_data __read_mostly __aligned(2*sizeof(long));
> +static u32 __futex_mask __ro_after_init;
> +static u32 __futex_shift __ro_after_init;
> +static struct futex_hash_bucket **__futex_queues __ro_after_init;
>
> -#define futex_hashmask (__futex_data.hashmask)
> -#define futex_hashshift (__futex_data.hashshift)
> -#define futex_queues (__futex_data.queues)
> +static __always_inline struct futex_hash_bucket **futex_queues(void)
> +{
> + return runtime_const_ptr(__futex_queues);
> +}
>
> struct futex_private_hash {
> int state;
> @@ -439,14 +435,14 @@ __futex_hash(union futex_key *key, struct futex_private_hash *fph)
> * NOTE: this isn't perfectly uniform, but it is fast and
> * handles sparse node masks.
> */
> - node = (hash >> futex_hashshift) % nr_node_ids;
> + node = runtime_const_shift_right_32(hash, __futex_shift) % nr_node_ids;
> if (!node_possible(node)) {
> node = find_next_bit_wrap(node_possible_map.bits,
> nr_node_ids, node);
> }
> }
>
> - return &futex_queues[node][hash & futex_hashmask];
> + return &futex_queues()[node][runtime_const_mask_32(hash, __futex_mask)];
> }
>
> /**
> @@ -1913,7 +1909,7 @@ int futex_hash_allocate_default(void)
> * 16 <= threads * 4 <= global hash size
> */
> buckets = roundup_pow_of_two(4 * threads);
> - buckets = clamp(buckets, 16, futex_hashmask + 1);
> + buckets = clamp(buckets, 16, __futex_mask + 1);
>
> if (current_buckets >= buckets)
> return 0;
> @@ -1983,10 +1979,19 @@ static int __init futex_init(void)
> hashsize = max(4, hashsize);
> hashsize = roundup_pow_of_two(hashsize);
> #endif
> - futex_hashshift = ilog2(hashsize);
> + __futex_mask = hashsize - 1;
> + __futex_shift = ilog2(hashsize);
__futex_mask is always a power of two minus 1, in other words all low bits set.
Would it be worth using an n-bit zero extension operation instead of an
arbitrary 32-bit mask? This would use fewer instructions on some architectures:
for example a single ubfx on arm64 and slli+srli on riscv.
Regards,
Samuel
Hello Samuel,
On 3/17/2026 8:36 AM, Samuel Holland wrote:
>> @@ -1913,7 +1909,7 @@ int futex_hash_allocate_default(void)
>> * 16 <= threads * 4 <= global hash size
>> */
>> buckets = roundup_pow_of_two(4 * threads);
>> - buckets = clamp(buckets, 16, futex_hashmask + 1);
>> + buckets = clamp(buckets, 16, __futex_mask + 1);
>>
>> if (current_buckets >= buckets)
>> return 0;
>> @@ -1983,10 +1979,19 @@ static int __init futex_init(void)
>> hashsize = max(4, hashsize);
>> hashsize = roundup_pow_of_two(hashsize);
>> #endif
>> - futex_hashshift = ilog2(hashsize);
>> + __futex_mask = hashsize - 1;
>> + __futex_shift = ilog2(hashsize);
>
> __futex_mask is always a power of two minus 1, in other words all low bits set.
> Would it be worth using an n-bit zero extension operation instead of an
> arbitrary 32-bit mask? This would use fewer instructions on some architectures:
> for example a single ubfx on arm64 and slli+srli on riscv.
Sure that works for __futex_mask but runtime_const_mask_32() should be
generic enough to handle any mask, no?
Currently, the __futex_hash() with futex_hashmask ends up being:
# ./include/linux/jhash.h:139: __jhash_final(a, b, c);
xor a4,a4,a3 # tmp350, tmp353, tmp334
...
# kernel/futex/core.c:449: return &futex_queues[node][hash & futex_hashmask];
lla a3,.LANCHOR0 # tmp361,
# kernel/futex/core.c:449: return &futex_queues[node][hash & futex_hashmask];
ld a5,0(a3) # __futex_data.hashmask, __futex_data.hashmask
...
# kernel/futex/core.c:449: return &futex_queues[node][hash & futex_hashmask];
and a5,a5,a4 # tmp358, tmp367, __futex_data.hashmask
which isn't too far from what runtime_const_mask_32() implements
where "lla + ld" sequence gets replaced by the "lui + addi"
sequence to load the immediate.
Sure it can be better here since we know the bitmask is of the form
GENMASK(N,0) but IMO runtime_const_mask_32() should generally work
for all masks.
Now, runtime_const_mask_lower_32(val, nbits) may be a better suited
API name for that purpose.
If there is enough interest, I'll go back to the drawing board and
go that route for v2 for arm64 and riscv.
--
Thanks and Regards,
Prateek
On 2026-03-16 05:24:01 [+0000], K Prateek Nayak wrote: > From: Peter Zijlstra <peterz@infradead.org> > > Runtime constify the read-only after init data __futex_shift(shift_32), > __futex_mask(mask_32), and __futex_queues(ptr) used in __futex_hash() > hot path to avoid referencing global variable. > > This also allows __futex_queues to be allocated dynamically to > "nr_node_ids" slots instead of reserving config dependent MAX_NUMNODES > (1 << CONFIG_NODES_SHIFT) worth of slots upfront. > > No functional chages intended. > > [ prateek: Dynamically allocate __futex_queues, mark the global data > __ro_after_init since they are constified after futex_init(). ] > > Link: https://patch.msgid.link/20260227161841.GH606826@noisy.programming.kicks-ass.net > Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> # MAX_NUMNODES bloat > Not-yet-signed-off-by: Peter Zijlstra <peterz@infradead.org> > Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com> This all looks nice. Let me look later at the resulting code. Thank you so far ;) Sebastian
Hello Sebastian, On 3/16/2026 1:44 PM, Sebastian Andrzej Siewior wrote: > This all looks nice. Let me look later at the resulting code. Thank you > so far ;) Let me know if you find anything nasty and we can see how to best address those bits in the next version :-) Thank you for taking a look at the series. -- Thanks and Regards, Prateek
© 2016 - 2026 Red Hat, Inc.