From: Peter Zijlstra <peterz@infradead.org>
The use of rcuref_t for reference counting introduces a performance bottleneck
when accessed concurrently by multiple threads during futex operations.
Replace rcuref_t with special crafted per-CPU reference counters. The
lifetime logic remains the same.
The newly allocate private hash starts in FR_PERCPU state. In this state, each
futex operation that requires the private hash uses a per-CPU counter (an
unsigned int) for incrementing or decrementing the reference count.
When the private hash is about to be replaced, the per-CPU counters are
migrated to a atomic_t counter mm_struct::futex_atomic.
The migration process:
- Waiting for one RCU grace period to ensure all users observe the
current private hash. This can be skipped if a grace period elapsed
since the private hash was assigned.
- futex_private_hash::state is set to FR_ATOMIC, forcing all users to
use mm_struct::futex_atomic for reference counting.
- After a RCU grace period, all users are guaranteed to be using the
atomic counter. The per-CPU counters can now be summed up and added to
the atomic_t counter. If the resulting count is zero, the hash can be
safely replaced. Otherwise, active users still hold a valid reference.
- Once the atomic reference count drops to zero, the next futex
operation will switch to the new private hash.
call_rcu_hurry() is used to speed up transition which otherwise might be
delay with RCU_LAZY. There is nothing wrong with using call_rcu(). The
side effects would be that on auto scaling the new hash is used later
and the SET_SLOTS prctl() will block longer.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/futex.h | 14 +--
include/linux/mm_types.h | 5 +
init/Kconfig | 4 -
kernel/fork.c | 6 +-
kernel/futex/core.c | 237 ++++++++++++++++++++++++++++++++++++---
5 files changed, 234 insertions(+), 32 deletions(-)
diff --git a/include/linux/futex.h b/include/linux/futex.h
index b37193653e6b5..cd773febd497b 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -85,18 +85,12 @@ int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
#ifdef CONFIG_FUTEX_PRIVATE_HASH
int futex_hash_allocate_default(void);
void futex_hash_free(struct mm_struct *mm);
-
-static inline void futex_mm_init(struct mm_struct *mm)
-{
- RCU_INIT_POINTER(mm->futex_phash, NULL);
- mm->futex_phash_new = NULL;
- mutex_init(&mm->futex_hash_lock);
-}
+int futex_mm_init(struct mm_struct *mm);
#else /* !CONFIG_FUTEX_PRIVATE_HASH */
static inline int futex_hash_allocate_default(void) { return 0; }
-static inline void futex_hash_free(struct mm_struct *mm) { }
-static inline void futex_mm_init(struct mm_struct *mm) { }
+static inline int futex_hash_free(struct mm_struct *mm) { return 0; }
+static inline int futex_mm_init(struct mm_struct *mm) { return 0; }
#endif /* CONFIG_FUTEX_PRIVATE_HASH */
#else /* !CONFIG_FUTEX */
@@ -118,7 +112,7 @@ static inline int futex_hash_allocate_default(void)
{
return 0;
}
-static inline void futex_hash_free(struct mm_struct *mm) { }
+static inline int futex_hash_free(struct mm_struct *mm) { return 0; }
static inline void futex_mm_init(struct mm_struct *mm) { }
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d6b91e8a66d6d..0f0662157066a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1070,6 +1070,11 @@ struct mm_struct {
struct mutex futex_hash_lock;
struct futex_private_hash __rcu *futex_phash;
struct futex_private_hash *futex_phash_new;
+ /* futex-ref */
+ unsigned long futex_batches;
+ struct rcu_head futex_rcu;
+ atomic_long_t futex_atomic;
+ unsigned int __percpu *futex_ref;
#endif
unsigned long hiwater_rss; /* High-watermark of RSS usage */
diff --git a/init/Kconfig b/init/Kconfig
index 666783eb50abd..af4c2f0854554 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1716,13 +1716,9 @@ config FUTEX_PI
depends on FUTEX && RT_MUTEXES
default y
-#
-# marked broken for performance reasons; gives us one more cycle to sort things out.
-#
config FUTEX_PRIVATE_HASH
bool
depends on FUTEX && !BASE_SMALL && MMU
- depends on BROKEN
default y
config FUTEX_MPOL
diff --git a/kernel/fork.c b/kernel/fork.c
index 1ee8eb11f38ba..66c4d4cc2340b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1046,7 +1046,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
RCU_INIT_POINTER(mm->exe_file, NULL);
mmu_notifier_subscriptions_init(mm);
init_tlb_flush_pending(mm);
- futex_mm_init(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
mm->pmd_huge_pte = NULL;
#endif
@@ -1061,6 +1060,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->def_flags = 0;
}
+ if (futex_mm_init(mm))
+ goto fail_mm_init;
+
if (mm_alloc_pgd(mm))
goto fail_nopgd;
@@ -1090,6 +1092,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
fail_noid:
mm_free_pgd(mm);
fail_nopgd:
+ futex_hash_free(mm);
+fail_mm_init:
free_mm(mm);
return NULL;
}
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 90d53fb0ee9e1..b578a536a4fe2 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -42,7 +42,6 @@
#include <linux/fault-inject.h>
#include <linux/slab.h>
#include <linux/prctl.h>
-#include <linux/rcuref.h>
#include <linux/mempolicy.h>
#include <linux/mmap_lock.h>
@@ -65,7 +64,7 @@ static struct {
#define futex_queues (__futex_data.queues)
struct futex_private_hash {
- rcuref_t users;
+ int state;
unsigned int hash_mask;
struct rcu_head rcu;
void *mm;
@@ -129,6 +128,12 @@ static struct futex_hash_bucket *
__futex_hash(union futex_key *key, struct futex_private_hash *fph);
#ifdef CONFIG_FUTEX_PRIVATE_HASH
+static bool futex_ref_get(struct futex_private_hash *fph);
+static bool futex_ref_put(struct futex_private_hash *fph);
+static bool futex_ref_is_dead(struct futex_private_hash *fph);
+
+enum { FR_PERCPU = 0, FR_ATOMIC };
+
static inline bool futex_key_is_private(union futex_key *key)
{
/*
@@ -142,15 +147,14 @@ bool futex_private_hash_get(struct futex_private_hash *fph)
{
if (fph->immutable)
return true;
- return rcuref_get(&fph->users);
+ return futex_ref_get(fph);
}
void futex_private_hash_put(struct futex_private_hash *fph)
{
- /* Ignore return value, last put is verified via rcuref_is_dead() */
if (fph->immutable)
return;
- if (rcuref_put(&fph->users))
+ if (futex_ref_put(fph))
wake_up_var(fph->mm);
}
@@ -243,14 +247,18 @@ static bool __futex_pivot_hash(struct mm_struct *mm,
fph = rcu_dereference_protected(mm->futex_phash,
lockdep_is_held(&mm->futex_hash_lock));
if (fph) {
- if (!rcuref_is_dead(&fph->users)) {
+ if (!futex_ref_is_dead(fph)) {
mm->futex_phash_new = new;
return false;
}
futex_rehash_private(fph, new);
}
- rcu_assign_pointer(mm->futex_phash, new);
+ new->state = FR_PERCPU;
+ scoped_guard(rcu) {
+ mm->futex_batches = get_state_synchronize_rcu();
+ rcu_assign_pointer(mm->futex_phash, new);
+ }
kvfree_rcu(fph, rcu);
return true;
}
@@ -289,9 +297,7 @@ struct futex_private_hash *futex_private_hash(void)
if (!fph)
return NULL;
- if (fph->immutable)
- return fph;
- if (rcuref_get(&fph->users))
+ if (futex_private_hash_get(fph))
return fph;
}
futex_pivot_hash(mm);
@@ -1527,16 +1533,213 @@ static void futex_hash_bucket_init(struct futex_hash_bucket *fhb,
#define FH_IMMUTABLE 0x02
#ifdef CONFIG_FUTEX_PRIVATE_HASH
+
+/*
+ * futex-ref
+ *
+ * Heavily inspired by percpu-rwsem/percpu-refcount; not reusing any of that
+ * code because it just doesn't fit right.
+ *
+ * Dual counter, per-cpu / atomic approach like percpu-refcount, except it
+ * re-initializes the state automatically, such that the fph swizzle is also a
+ * transition back to per-cpu.
+ */
+
+static void futex_ref_rcu(struct rcu_head *head);
+
+static void __futex_ref_atomic_begin(struct futex_private_hash *fph)
+{
+ struct mm_struct *mm = fph->mm;
+
+ /*
+ * The counter we're about to switch to must have fully switched;
+ * otherwise it would be impossible for it to have reported success
+ * from futex_ref_is_dead().
+ */
+ WARN_ON_ONCE(atomic_long_read(&mm->futex_atomic) != 0);
+
+ /*
+ * Set the atomic to the bias value such that futex_ref_{get,put}()
+ * will never observe 0. Will be fixed up in __futex_ref_atomic_end()
+ * when folding in the percpu count.
+ */
+ atomic_long_set(&mm->futex_atomic, LONG_MAX);
+ smp_store_release(&fph->state, FR_ATOMIC);
+
+ call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
+}
+
+static void __futex_ref_atomic_end(struct futex_private_hash *fph)
+{
+ struct mm_struct *mm = fph->mm;
+ unsigned int count = 0;
+ long ret;
+ int cpu;
+
+ /*
+ * Per __futex_ref_atomic_begin() the state of the fph must be ATOMIC
+ * and per this RCU callback, everybody must now observe this state and
+ * use the atomic variable.
+ */
+ WARN_ON_ONCE(fph->state != FR_ATOMIC);
+
+ /*
+ * Therefore the per-cpu counter is now stable, sum and reset.
+ */
+ for_each_possible_cpu(cpu) {
+ unsigned int *ptr = per_cpu_ptr(mm->futex_ref, cpu);
+ count += *ptr;
+ *ptr = 0;
+ }
+
+ /*
+ * Re-init for the next cycle.
+ */
+ this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
+
+ /*
+ * Add actual count, subtract bias and initial refcount.
+ *
+ * The moment this atomic operation happens, futex_ref_is_dead() can
+ * become true.
+ */
+ ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex_atomic);
+ if (!ret)
+ wake_up_var(mm);
+
+ WARN_ON_ONCE(ret < 0);
+}
+
+static void futex_ref_rcu(struct rcu_head *head)
+{
+ struct mm_struct *mm = container_of(head, struct mm_struct, futex_rcu);
+ struct futex_private_hash *fph = rcu_dereference_raw(mm->futex_phash);
+
+ if (fph->state == FR_PERCPU) {
+ /*
+ * Per this extra grace-period, everybody must now observe
+ * fph as the current fph and no previously observed fph's
+ * are in-flight.
+ *
+ * Notably, nobody will now rely on the atomic
+ * futex_ref_is_dead() state anymore so we can begin the
+ * migration of the per-cpu counter into the atomic.
+ */
+ __futex_ref_atomic_begin(fph);
+ return;
+ }
+
+ __futex_ref_atomic_end(fph);
+}
+
+/*
+ * Drop the initial refcount and transition to atomics.
+ */
+static void futex_ref_drop(struct futex_private_hash *fph)
+{
+ struct mm_struct *mm = fph->mm;
+
+ /*
+ * Can only transition the current fph;
+ */
+ WARN_ON_ONCE(rcu_dereference_raw(mm->futex_phash) != fph);
+
+ /*
+ * In order to avoid the following scenario:
+ *
+ * futex_hash() __futex_pivot_hash()
+ * guard(rcu); guard(mm->futex_hash_lock);
+ * fph = mm->futex_phash;
+ * rcu_assign_pointer(&mm->futex_phash, new);
+ * futex_hash_allocate()
+ * futex_ref_drop()
+ * fph->state = FR_ATOMIC;
+ * atomic_set(, BIAS);
+ *
+ * futex_private_hash_get(fph); // OOPS
+ *
+ * Where an old fph (which is FR_ATOMIC) and should fail on
+ * inc_not_zero, will succeed because a new transition is started and
+ * the atomic is bias'ed away from 0.
+ *
+ * There must be at least one full grace-period between publishing a
+ * new fph and trying to replace it.
+ */
+ if (poll_state_synchronize_rcu(mm->futex_batches)) {
+ /*
+ * There was a grace-period, we can begin now.
+ */
+ __futex_ref_atomic_begin(fph);
+ return;
+ }
+
+ call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
+}
+
+static bool futex_ref_get(struct futex_private_hash *fph)
+{
+ struct mm_struct *mm = fph->mm;
+
+ guard(rcu)();
+
+ if (smp_load_acquire(&fph->state) == FR_PERCPU) {
+ this_cpu_inc(*mm->futex_ref);
+ return true;
+ }
+
+ return atomic_long_inc_not_zero(&mm->futex_atomic);
+}
+
+static bool futex_ref_put(struct futex_private_hash *fph)
+{
+ struct mm_struct *mm = fph->mm;
+
+ guard(rcu)();
+
+ if (smp_load_acquire(&fph->state) == FR_PERCPU) {
+ this_cpu_dec(*mm->futex_ref);
+ return false;
+ }
+
+ return atomic_long_dec_and_test(&mm->futex_atomic);
+}
+
+static bool futex_ref_is_dead(struct futex_private_hash *fph)
+{
+ struct mm_struct *mm = fph->mm;
+
+ guard(rcu)();
+
+ if (smp_load_acquire(&fph->state) == FR_PERCPU)
+ return false;
+
+ return atomic_long_read(&mm->futex_atomic) == 0;
+}
+
+int futex_mm_init(struct mm_struct *mm)
+{
+ mutex_init(&mm->futex_hash_lock);
+ RCU_INIT_POINTER(mm->futex_phash, NULL);
+ mm->futex_phash_new = NULL;
+ /* futex-ref */
+ atomic_long_set(&mm->futex_atomic, 0);
+ mm->futex_batches = get_state_synchronize_rcu();
+ mm->futex_ref = alloc_percpu(unsigned int);
+ if (!mm->futex_ref)
+ return -ENOMEM;
+ this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
+ return 0;
+}
+
void futex_hash_free(struct mm_struct *mm)
{
struct futex_private_hash *fph;
+ free_percpu(mm->futex_ref);
kvfree(mm->futex_phash_new);
fph = rcu_dereference_raw(mm->futex_phash);
- if (fph) {
- WARN_ON_ONCE(rcuref_read(&fph->users) > 1);
+ if (fph)
kvfree(fph);
- }
}
static bool futex_pivot_pending(struct mm_struct *mm)
@@ -1549,7 +1752,7 @@ static bool futex_pivot_pending(struct mm_struct *mm)
return true;
fph = rcu_dereference(mm->futex_phash);
- return rcuref_is_dead(&fph->users);
+ return futex_ref_is_dead(fph);
}
static bool futex_hash_less(struct futex_private_hash *a,
@@ -1598,11 +1801,11 @@ static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
}
}
- fph = kvzalloc(struct_size(fph, queues, hash_slots), GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
+ fph = kvzalloc(struct_size(fph, queues, hash_slots),
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!fph)
return -ENOMEM;
- rcuref_init(&fph->users, 1);
fph->hash_mask = hash_slots ? hash_slots - 1 : 0;
fph->custom = custom;
fph->immutable = !!(flags & FH_IMMUTABLE);
@@ -1645,7 +1848,7 @@ static int futex_hash_allocate(unsigned int hash_slots, unsigned int flags)
* allocated a replacement hash, drop the initial
* reference on the existing hash.
*/
- futex_private_hash_put(cur);
+ futex_ref_drop(cur);
}
if (new) {
--
2.50.0
On 7/7/25 10:36 AM, Sebastian Andrzej Siewior wrote: > From: Peter Zijlstra <peterz@infradead.org> > > The use of rcuref_t for reference counting introduces a performance bottleneck > when accessed concurrently by multiple threads during futex operations. > > Replace rcuref_t with special crafted per-CPU reference counters. The > lifetime logic remains the same. > > The newly allocate private hash starts in FR_PERCPU state. In this state, each > futex operation that requires the private hash uses a per-CPU counter (an > unsigned int) for incrementing or decrementing the reference count. > > When the private hash is about to be replaced, the per-CPU counters are > migrated to a atomic_t counter mm_struct::futex_atomic. > The migration process: > - Waiting for one RCU grace period to ensure all users observe the > current private hash. This can be skipped if a grace period elapsed > since the private hash was assigned. > > - futex_private_hash::state is set to FR_ATOMIC, forcing all users to > use mm_struct::futex_atomic for reference counting. > > - After a RCU grace period, all users are guaranteed to be using the > atomic counter. The per-CPU counters can now be summed up and added to > the atomic_t counter. If the resulting count is zero, the hash can be > safely replaced. Otherwise, active users still hold a valid reference. > > - Once the atomic reference count drops to zero, the next futex > operation will switch to the new private hash. > > call_rcu_hurry() is used to speed up transition which otherwise might be > delay with RCU_LAZY. There is nothing wrong with using call_rcu(). The > side effects would be that on auto scaling the new hash is used later > and the SET_SLOTS prctl() will block longer. > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> This looks somewhat like what the percpu refcount does (see lib/percpu-refcount.c). Could this be used instead of reinventing the wheel again? Cheers, Longman
On 2025-07-08 09:43:44 [-0400], Waiman Long wrote: > This looks somewhat like what the percpu refcount does (see > lib/percpu-refcount.c). Could this be used instead of reinventing the wheel > again? From the comment: * futex-ref * * Heavily inspired by percpu-rwsem/percpu-refcount; not reusing any of that * code because it just doesn't fit right. * * Dual counter, per-cpu / atomic approach like percpu-refcount, except it * re-initializes the state automatically, such that the fph swizzle is also a * transition back to per-cpu. but I leave it up to Peter if he considers merging that. > Cheers, > Longman Sebastian
On Tue, Jul 08, 2025 at 03:47:08PM +0200, Sebastian Andrzej Siewior wrote: > On 2025-07-08 09:43:44 [-0400], Waiman Long wrote: > > This looks somewhat like what the percpu refcount does (see > > lib/percpu-refcount.c). Could this be used instead of reinventing the wheel > > again? > > From the comment: > > * futex-ref > * > * Heavily inspired by percpu-rwsem/percpu-refcount; not reusing any of that > * code because it just doesn't fit right. > * > * Dual counter, per-cpu / atomic approach like percpu-refcount, except it > * re-initializes the state automatically, such that the fph swizzle is also a > * transition back to per-cpu. > > but I leave it up to Peter if he considers merging that. Basically what the comment says. Trying to reuse things ended up in a mess. It really isn't much code, most of it is comments.
On 7/8/25 3:06 PM, Peter Zijlstra wrote: > On Tue, Jul 08, 2025 at 03:47:08PM +0200, Sebastian Andrzej Siewior wrote: >> On 2025-07-08 09:43:44 [-0400], Waiman Long wrote: >>> This looks somewhat like what the percpu refcount does (see >>> lib/percpu-refcount.c). Could this be used instead of reinventing the wheel >>> again? >> From the comment: >> >> * futex-ref >> * >> * Heavily inspired by percpu-rwsem/percpu-refcount; not reusing any of that >> * code because it just doesn't fit right. >> * >> * Dual counter, per-cpu / atomic approach like percpu-refcount, except it >> * re-initializes the state automatically, such that the fph swizzle is also a >> * transition back to per-cpu. >> >> but I leave it up to Peter if he considers merging that. > Basically what the comment says. Trying to reuse things ended up in a > mess. It really isn't much code, most of it is comments. > I got it now. I am not against adding a variant specific to this code giving that we want to fix the performance regression ASAP. Merging it to any existing set of helpers may be something we want to do in the future. Cheers, Longman
On 2025-07-07 16:36:22 [+0200], To linux-kernel@vger.kernel.org wrote: so a box was doing innocent things and then this happened: | slab mm_struct start ffff888549a50580 pointer offset 280 size 1352 | BUG: kernel NULL pointer dereference, address: 0000000000000000 | #PF: supervisor instruction fetch in kernel mode | #PF: error_code(0x0010) - not-present page | PGD 0 P4D 0 | Oops: Oops: 0010 [#1] SMP | CPU: 11 UID: 1001 PID: 125007 Comm: clang Not tainted 6.16.0-rc5+ #262 PREEMPT(lazy) 3bf8bc6327fe388c2a27e778516b456f280aa854 | Hardware name: Intel Corporation S2600CP/S2600CP, BIOS SE5C600.86B.02.03.0003.041920141333 04/19/2014 | RIP: 0010:0x0 | Code: Unable to access opcode bytes at 0xffffffffffffffd6. | RSP: 0000:ffffc90020317e60 EFLAGS: 00010282 | RAX: 0000000000000001 RBX: 0000000000000006 RCX: 0000000000000000 | RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff888549a50698 | RBP: ffff888a3faeab00 R08: 0000000000000000 R09: ffffc90020317bc8 | R10: ffffffff8296bdc8 R11: 0000000000000003 R12: ffff8881b6f80000 | R13: ffffc90020317e98 R14: 0000000000000005 R15: 0000000000000000 | FS: 00007fd37b766c40(0000) GS:ffff888abc9ef000(0000) knlGS:0000000000000000 | CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 | CR2: ffffffffffffffd6 CR3: 00000005c3d5f001 CR4: 00000000000626f0 | Call Trace: | <TASK> | rcu_core+0x27c/0x720 | ? rcu_core+0x21c/0x720 | handle_softirqs+0xc5/0x260 | irq_exit_rcu+0x85/0xa0 | sysvec_apic_timer_interrupt+0x3d/0x90 | asm_sysvec_apic_timer_interrupt+0x1a/0x20 | RIP: 0033:0x7fd3862e1190 | Code: 41 56 53 48 89 d3 49 89 f6 49 89 ff 48 c7 47 08 00 00 00 00 8b 47 10 48 85 c0 74 52 49 8b 0f 48 c1 e0 04 31 d2 0f 1f 44 00 00 <48> c7 04 11 00 f0 ff ff 48 83 c2 10 48 39 d0 75 ef eb 31 4d 85 c9 | RSP: 002b:00007fff1e519aa0 EFLAGS: 00010202 | RAX: 0000000000040000 RBX: 00007fd37b71f010 RCX: 00007fd37b5c3010 | RDX: 000000000000fff0 RSI: 00007fd37b6ff010 RDI: 000055836a11e290 | RBP: 0000000000150050 R08: 00000000ffffffff R09: 0000000000000000 | R10: 0000000000000022 R11: 0000000000000246 R12: 000055836a597420 | R13: 000055836a593030 R14: 00007fd37b6ff010 R15: 000055836a11e290 | </TASK> | Modules linked in: | Dumping ftrace buffer: … | CR2: 0000000000000000 | ---[ end trace 0000000000000000 ]--- | RIP: 0010:0x0 | Code: Unable to access opcode bytes at 0xffffffffffffffd6. | RSP: 0000:ffffc90020317e60 EFLAGS: 00010282 | RAX: 0000000000000001 RBX: 0000000000000006 RCX: 0000000000000000 | RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff888549a50698 on the plus side there is no evidence that this could be futex related :) However, I was wondering could this be because nothing ensures that the mm stays around after the RCU callback has been enqueued. What about this: diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index b13474825130f..2201da0afecc5 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -140,7 +140,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm) /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); -#ifdef CONFIG_MMU +#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH) /* same as above but performs the slow path from the async context. Can * be called from the atomic context as well */ diff --git a/kernel/fork.c b/kernel/fork.c index 66c4d4cc2340b..0b885dcbde9af 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1149,7 +1149,7 @@ void mmput(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(mmput); -#ifdef CONFIG_MMU +#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH) static void mmput_async_fn(struct work_struct *work) { struct mm_struct *mm = container_of(work, struct mm_struct, diff --git a/kernel/futex/core.c b/kernel/futex/core.c index d1877abbb7147..cd8463f3d1026 100644 --- a/kernel/futex/core.c +++ b/kernel/futex/core.c @@ -1602,6 +1602,7 @@ static void __futex_ref_atomic_end(struct futex_private_hash *fph) wake_up_var(mm); WARN_ON_ONCE(ret < 0); + mmput_async(mm); } static void futex_ref_rcu(struct rcu_head *head) @@ -1637,6 +1638,11 @@ static void futex_ref_drop(struct futex_private_hash *fph) * Can only transition the current fph; */ WARN_ON_ONCE(rcu_dereference_raw(mm->futex_phash) != fph); + /* + * We enqueue at least one RCU callback. Ensure mm stays if the task + * exits before the transition is completed. + */ + mmget(mm); /* * In order to avoid the following scenario: -- 2.50.0 Sebastian
On Mon, 7 Jul 2025 16:36:22 +0200 Sebastian Andrzej Siewior wrote: > +static bool futex_ref_get(struct futex_private_hash *fph) > +{ > + struct mm_struct *mm = fph->mm; > + > + guard(rcu)(); > + Like regular refcount_t, it is buggy to touch fph if futex_atomic drops to 0. And more important guard(rcu) does not prevent it from dropping to 0. > + if (smp_load_acquire(&fph->state) == FR_PERCPU) { > + this_cpu_inc(*mm->futex_ref); > + return true; > + } > + > + return atomic_long_inc_not_zero(&mm->futex_atomic); > +} > + > +static bool futex_ref_put(struct futex_private_hash *fph) > +{ > + struct mm_struct *mm = fph->mm; > + > + guard(rcu)(); > + > + if (smp_load_acquire(&fph->state) == FR_PERCPU) { > + this_cpu_dec(*mm->futex_ref); > + return false; > + } > + > + return atomic_long_dec_and_test(&mm->futex_atomic); > +}
On 2025-07-08 16:56:39 [+0800], Hillf Danton wrote: > On Mon, 7 Jul 2025 16:36:22 +0200 Sebastian Andrzej Siewior wrote: > > +static bool futex_ref_get(struct futex_private_hash *fph) > > +{ > > + struct mm_struct *mm = fph->mm; > > + > > + guard(rcu)(); > > + > Like regular refcount_t, it is buggy to touch fph if futex_atomic drops > to 0. And more important guard(rcu) does not prevent it from dropping to 0. What is your intention with this? There is an inc-if-not-zero to ensure this does not happen. And it has to drop to zero in order to get replaced. > > + if (smp_load_acquire(&fph->state) == FR_PERCPU) { > > + this_cpu_inc(*mm->futex_ref); > > + return true; > > + } > > + > > + return atomic_long_inc_not_zero(&mm->futex_atomic); > > +} > > + > > +static bool futex_ref_put(struct futex_private_hash *fph) > > +{ > > + struct mm_struct *mm = fph->mm; > > + > > + guard(rcu)(); > > + > > + if (smp_load_acquire(&fph->state) == FR_PERCPU) { > > + this_cpu_dec(*mm->futex_ref); > > + return false; > > + } > > + > > + return atomic_long_dec_and_test(&mm->futex_atomic); > > +} Sebastian
On Tue, 8 Jul 2025 11:16:26 +0200 Sebastian Andrzej Siewior wrote: > On 2025-07-08 16:56:39 [+0800], Hillf Danton wrote: > > On Mon, 7 Jul 2025 16:36:22 +0200 Sebastian Andrzej Siewior wrote: > > > +static bool futex_ref_get(struct futex_private_hash *fph) > > > +{ > > > + struct mm_struct *mm = fph->mm; > > > + > > > + guard(rcu)(); > > > + > > Like regular refcount_t, it is buggy to touch fph if futex_atomic drops > > to 0. And more important guard(rcu) does not prevent it from dropping to 0. > > What is your intention with this? There is an inc-if-not-zero to ensure I am just simply wondering why get and put do not work without the rcu guard? > this does not happen. And it has to drop to zero in order to get > replaced. > > > > + if (smp_load_acquire(&fph->state) == FR_PERCPU) { > > > + this_cpu_inc(*mm->futex_ref); > > > + return true; > > > + } > > > + > > > + return atomic_long_inc_not_zero(&mm->futex_atomic); > > > +} > > > + > > > +static bool futex_ref_put(struct futex_private_hash *fph) > > > +{ > > > + struct mm_struct *mm = fph->mm; > > > + > > > + guard(rcu)(); > > > + > > > + if (smp_load_acquire(&fph->state) == FR_PERCPU) { > > > + this_cpu_dec(*mm->futex_ref); > > > + return false; > > > + } > > > + > > > + return atomic_long_dec_and_test(&mm->futex_atomic); > > > +} > > Sebastian
On 2025-07-08 20:01:56 [+0800], Hillf Danton wrote: > On Tue, 8 Jul 2025 11:16:26 +0200 Sebastian Andrzej Siewior wrote: > > On 2025-07-08 16:56:39 [+0800], Hillf Danton wrote: > > > On Mon, 7 Jul 2025 16:36:22 +0200 Sebastian Andrzej Siewior wrote: > > > > +static bool futex_ref_get(struct futex_private_hash *fph) > > > > +{ > > > > + struct mm_struct *mm = fph->mm; > > > > + > > > > + guard(rcu)(); > > > > + > > > Like regular refcount_t, it is buggy to touch fph if futex_atomic drops > > > to 0. And more important guard(rcu) does not prevent it from dropping to 0. > > > > What is your intention with this? There is an inc-if-not-zero to ensure > > I am just simply wondering why get and put do not work without the rcu guard? To ensure every get/ put user within this section observed the switch to atomics. There is this call-rcu callback which performs the switch. This one will be invoked after every user, that was user the per-CPU counter, is gone and using the atomic one. Sebastian
On Tue, 8 Jul 2025 15:15:58 +0200 Sebastian Andrzej Siewior wrote: > On 2025-07-08 20:01:56 [+0800], Hillf Danton wrote: > > On Tue, 8 Jul 2025 11:16:26 +0200 Sebastian Andrzej Siewior wrote: > > > On 2025-07-08 16:56:39 [+0800], Hillf Danton wrote: > > > > On Mon, 7 Jul 2025 16:36:22 +0200 Sebastian Andrzej Siewior wrote: > > > > > +static bool futex_ref_get(struct futex_private_hash *fph) > > > > > +{ > > > > > + struct mm_struct *mm = fph->mm; > > > > > + > > > > > + guard(rcu)(); > > > > > + > > > > Like regular refcount_t, it is buggy to touch fph if futex_atomic drops > > > > to 0. And more important guard(rcu) does not prevent it from dropping to 0. > > > > > > What is your intention with this? There is an inc-if-not-zero to ensure > > > > I am just simply wondering why get and put do not work without the rcu guard? > > To ensure every get/ put user within this section observed the switch to > atomics. There is this call-rcu callback which performs the switch. This > one will be invoked after every user, that was user the per-CPU counter, > is gone and using the atomic one. > Then percpu refcount sounds like a better option because it is free at least.
On 2025-07-07 16:36:22 [+0200], To linux-kernel@vger.kernel.org wrote: > From: Peter Zijlstra <peterz@infradead.org> > > The use of rcuref_t for reference counting introduces a performance bottleneck > when accessed concurrently by multiple threads during futex operations. just folded this bit after kernel test robot complained: diff --git a/include/linux/futex.h b/include/linux/futex.h index cd773febd497b..9e9750f049805 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -113,7 +113,7 @@ static inline int futex_hash_allocate_default(void) return 0; } static inline int futex_hash_free(struct mm_struct *mm) { return 0; } -static inline void futex_mm_init(struct mm_struct *mm) { } +static inline int futex_mm_init(struct mm_struct *mm) { return 0; } #endif Sebastian
© 2016 - 2025 Red Hat, Inc.