[patch 2/8] futex: Move futex related mm_struct data into a struct

Thomas Gleixner posted 8 patches 3 weeks ago
There is a newer version of this series
[patch 2/8] futex: Move futex related mm_struct data into a struct
Posted by Thomas Gleixner 3 weeks ago
Having all these members in mm_struct along with the required #ifdeffery is
annoying, does not allow efficient initializing of the data with
memset() and makes extending it tedious.

Move it into a data structure and fix up all usage sites.

Signed-off-by: Thomas Gleixner <tglx@kernel.org>
---
 include/linux/futex_types.h |   22 +++++++
 include/linux/mm_types.h    |   11 ---
 kernel/futex/core.c         |  123 ++++++++++++++++++++------------------------
 3 files changed, 80 insertions(+), 76 deletions(-)

--- a/include/linux/futex_types.h
+++ b/include/linux/futex_types.h
@@ -31,4 +31,26 @@ struct futex_ctrl {
 struct futex_ctrl { };
 #endif /* !CONFIG_FUTEX */
 
+/**
+ * struct futex_mm_data - Futex related per MM data
+ * @phash_lock:		Mutex to protect the private hash operations
+ * @phash:		RCU managed pointer to the private hash
+ * @phash_new:		Pointer to a newly allocated private hash
+ * @phash_batches:	Batch state for RCU synchronization
+ * @phash_rcu:		RCU head for call_rcu()
+ * @phash_atomic:	Aggregate value for @phash_ref
+ * @phash_ref:		Per CPU reference counter for a private hash
+ */
+struct futex_mm_data {
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+	struct mutex			phash_lock;
+	struct futex_private_hash	__rcu *phash;
+	struct futex_private_hash	*phash_new;
+	unsigned long			phash_batches;
+	struct rcu_head			phash_rcu;
+	atomic_long_t			phash_atomic;
+	unsigned int			__percpu *phash_ref;
+#endif
+};
+
 #endif /* _LINUX_FUTEX_TYPES_H */
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1221,16 +1221,7 @@ struct mm_struct {
 		 */
 		seqcount_t mm_lock_seq;
 #endif
-#ifdef CONFIG_FUTEX_PRIVATE_HASH
-		struct mutex			futex_hash_lock;
-		struct futex_private_hash	__rcu *futex_phash;
-		struct futex_private_hash	*futex_phash_new;
-		/* futex-ref */
-		unsigned long			futex_batches;
-		struct rcu_head			futex_rcu;
-		atomic_long_t			futex_atomic;
-		unsigned int			__percpu *futex_ref;
-#endif
+		struct futex_mm_data	futex;
 
 		unsigned long hiwater_rss; /* High-watermark of RSS usage */
 		unsigned long hiwater_vm;  /* High-water virtual memory usage */
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -188,13 +188,13 @@ static struct futex_hash_bucket *
 		return NULL;
 
 	if (!fph)
-		fph = rcu_dereference(key->private.mm->futex_phash);
+		fph = rcu_dereference(key->private.mm->futex.phash);
 	if (!fph || !fph->hash_mask)
 		return NULL;
 
-	hash = jhash2((void *)&key->private.address,
-		      sizeof(key->private.address) / 4,
+	hash = jhash2((void *)&key->private.address, sizeof(key->private.address) / 4,
 		      key->both.offset);
+
 	return &fph->queues[hash & fph->hash_mask];
 }
 
@@ -238,13 +238,12 @@ static bool __futex_pivot_hash(struct mm
 {
 	struct futex_private_hash *fph;
 
-	WARN_ON_ONCE(mm->futex_phash_new);
+	WARN_ON_ONCE(mm->futex.phash_new);
 
-	fph = rcu_dereference_protected(mm->futex_phash,
-					lockdep_is_held(&mm->futex_hash_lock));
+	fph = rcu_dereference_protected(mm->futex.phash, lockdep_is_held(&mm->futex.phash_lock));
 	if (fph) {
 		if (!futex_ref_is_dead(fph)) {
-			mm->futex_phash_new = new;
+			mm->futex.phash_new = new;
 			return false;
 		}
 
@@ -252,8 +251,8 @@ static bool __futex_pivot_hash(struct mm
 	}
 	new->state = FR_PERCPU;
 	scoped_guard(rcu) {
-		mm->futex_batches = get_state_synchronize_rcu();
-		rcu_assign_pointer(mm->futex_phash, new);
+		mm->futex.phash_batches = get_state_synchronize_rcu();
+		rcu_assign_pointer(mm->futex.phash, new);
 	}
 	kvfree_rcu(fph, rcu);
 	return true;
@@ -261,12 +260,12 @@ static bool __futex_pivot_hash(struct mm
 
 static void futex_pivot_hash(struct mm_struct *mm)
 {
-	scoped_guard(mutex, &mm->futex_hash_lock) {
+	scoped_guard(mutex, &mm->futex.phash_lock) {
 		struct futex_private_hash *fph;
 
-		fph = mm->futex_phash_new;
+		fph = mm->futex.phash_new;
 		if (fph) {
-			mm->futex_phash_new = NULL;
+			mm->futex.phash_new = NULL;
 			__futex_pivot_hash(mm, fph);
 		}
 	}
@@ -289,7 +288,7 @@ struct futex_private_hash *futex_private
 	scoped_guard(rcu) {
 		struct futex_private_hash *fph;
 
-		fph = rcu_dereference(mm->futex_phash);
+		fph = rcu_dereference(mm->futex.phash);
 		if (!fph)
 			return NULL;
 
@@ -412,8 +411,7 @@ static int futex_mpol(struct mm_struct *
  * private hash) is returned if existing. Otherwise a hash bucket from the
  * global hash is returned.
  */
-static struct futex_hash_bucket *
-__futex_hash(union futex_key *key, struct futex_private_hash *fph)
+static struct futex_hash_bucket *__futex_hash(union futex_key *key, struct futex_private_hash *fph)
 {
 	int node = key->both.node;
 	u32 hash;
@@ -426,8 +424,7 @@ static struct futex_hash_bucket *
 			return hb;
 	}
 
-	hash = jhash2((u32 *)key,
-		      offsetof(typeof(*key), both.offset) / sizeof(u32),
+	hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / sizeof(u32),
 		      key->both.offset);
 
 	if (node == FUTEX_NO_NODE) {
@@ -442,8 +439,7 @@ static struct futex_hash_bucket *
 		 */
 		node = (hash >> futex_hashshift) % nr_node_ids;
 		if (!node_possible(node)) {
-			node = find_next_bit_wrap(node_possible_map.bits,
-						  nr_node_ids, node);
+			node = find_next_bit_wrap(node_possible_map.bits, nr_node_ids, node);
 		}
 	}
 
@@ -460,9 +456,8 @@ static struct futex_hash_bucket *
  * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
  *	   value given
  */
-struct hrtimer_sleeper *
-futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
-		  int flags, u64 range_ns)
+struct hrtimer_sleeper *futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
+					  int flags, u64 range_ns)
 {
 	if (!time)
 		return NULL;
@@ -1551,17 +1546,17 @@ static void __futex_ref_atomic_begin(str
 	 * otherwise it would be impossible for it to have reported success
 	 * from futex_ref_is_dead().
 	 */
-	WARN_ON_ONCE(atomic_long_read(&mm->futex_atomic) != 0);
+	WARN_ON_ONCE(atomic_long_read(&mm->futex.phash_atomic) != 0);
 
 	/*
 	 * Set the atomic to the bias value such that futex_ref_{get,put}()
 	 * will never observe 0. Will be fixed up in __futex_ref_atomic_end()
 	 * when folding in the percpu count.
 	 */
-	atomic_long_set(&mm->futex_atomic, LONG_MAX);
+	atomic_long_set(&mm->futex.phash_atomic, LONG_MAX);
 	smp_store_release(&fph->state, FR_ATOMIC);
 
-	call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
+	call_rcu_hurry(&mm->futex.phash_rcu, futex_ref_rcu);
 }
 
 static void __futex_ref_atomic_end(struct futex_private_hash *fph)
@@ -1582,7 +1577,7 @@ static void __futex_ref_atomic_end(struc
 	 * Therefore the per-cpu counter is now stable, sum and reset.
 	 */
 	for_each_possible_cpu(cpu) {
-		unsigned int *ptr = per_cpu_ptr(mm->futex_ref, cpu);
+		unsigned int *ptr = per_cpu_ptr(mm->futex.phash_ref, cpu);
 		count += *ptr;
 		*ptr = 0;
 	}
@@ -1590,7 +1585,7 @@ static void __futex_ref_atomic_end(struc
 	/*
 	 * Re-init for the next cycle.
 	 */
-	this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
+	this_cpu_inc(*mm->futex.phash_ref); /* 0 -> 1 */
 
 	/*
 	 * Add actual count, subtract bias and initial refcount.
@@ -1598,7 +1593,7 @@ static void __futex_ref_atomic_end(struc
 	 * The moment this atomic operation happens, futex_ref_is_dead() can
 	 * become true.
 	 */
-	ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex_atomic);
+	ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex.phash_atomic);
 	if (!ret)
 		wake_up_var(mm);
 
@@ -1608,8 +1603,8 @@ static void __futex_ref_atomic_end(struc
 
 static void futex_ref_rcu(struct rcu_head *head)
 {
-	struct mm_struct *mm = container_of(head, struct mm_struct, futex_rcu);
-	struct futex_private_hash *fph = rcu_dereference_raw(mm->futex_phash);
+	struct mm_struct *mm = container_of(head, struct mm_struct, futex.phash_rcu);
+	struct futex_private_hash *fph = rcu_dereference_raw(mm->futex.phash);
 
 	if (fph->state == FR_PERCPU) {
 		/*
@@ -1638,7 +1633,7 @@ static void futex_ref_drop(struct futex_
 	/*
 	 * Can only transition the current fph;
 	 */
-	WARN_ON_ONCE(rcu_dereference_raw(mm->futex_phash) != fph);
+	WARN_ON_ONCE(rcu_dereference_raw(mm->futex.phash) != fph);
 	/*
 	 * We enqueue at least one RCU callback. Ensure mm stays if the task
 	 * exits before the transition is completed.
@@ -1650,8 +1645,8 @@ static void futex_ref_drop(struct futex_
 	 *
 	 * futex_hash()			__futex_pivot_hash()
 	 *   guard(rcu);		  guard(mm->futex_hash_lock);
-	 *   fph = mm->futex_phash;
-	 *				  rcu_assign_pointer(&mm->futex_phash, new);
+	 *   fph = mm->futex.phash;
+	 *				  rcu_assign_pointer(&mm->futex.phash, new);
 	 *				futex_hash_allocate()
 	 *				  futex_ref_drop()
 	 *				    fph->state = FR_ATOMIC;
@@ -1666,7 +1661,7 @@ static void futex_ref_drop(struct futex_
 	 * There must be at least one full grace-period between publishing a
 	 * new fph and trying to replace it.
 	 */
-	if (poll_state_synchronize_rcu(mm->futex_batches)) {
+	if (poll_state_synchronize_rcu(mm->futex.phash_batches)) {
 		/*
 		 * There was a grace-period, we can begin now.
 		 */
@@ -1674,7 +1669,7 @@ static void futex_ref_drop(struct futex_
 		return;
 	}
 
-	call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
+	call_rcu_hurry(&mm->futex.phash_rcu, futex_ref_rcu);
 }
 
 static bool futex_ref_get(struct futex_private_hash *fph)
@@ -1684,11 +1679,11 @@ static bool futex_ref_get(struct futex_p
 	guard(preempt)();
 
 	if (READ_ONCE(fph->state) == FR_PERCPU) {
-		__this_cpu_inc(*mm->futex_ref);
+		__this_cpu_inc(*mm->futex.phash_ref);
 		return true;
 	}
 
-	return atomic_long_inc_not_zero(&mm->futex_atomic);
+	return atomic_long_inc_not_zero(&mm->futex.phash_atomic);
 }
 
 static bool futex_ref_put(struct futex_private_hash *fph)
@@ -1698,11 +1693,11 @@ static bool futex_ref_put(struct futex_p
 	guard(preempt)();
 
 	if (READ_ONCE(fph->state) == FR_PERCPU) {
-		__this_cpu_dec(*mm->futex_ref);
+		__this_cpu_dec(*mm->futex.phash_ref);
 		return false;
 	}
 
-	return atomic_long_dec_and_test(&mm->futex_atomic);
+	return atomic_long_dec_and_test(&mm->futex.phash_atomic);
 }
 
 static bool futex_ref_is_dead(struct futex_private_hash *fph)
@@ -1714,18 +1709,14 @@ static bool futex_ref_is_dead(struct fut
 	if (smp_load_acquire(&fph->state) == FR_PERCPU)
 		return false;
 
-	return atomic_long_read(&mm->futex_atomic) == 0;
+	return atomic_long_read(&mm->futex.phash_atomic) == 0;
 }
 
 int futex_mm_init(struct mm_struct *mm)
 {
-	mutex_init(&mm->futex_hash_lock);
-	RCU_INIT_POINTER(mm->futex_phash, NULL);
-	mm->futex_phash_new = NULL;
-	/* futex-ref */
-	mm->futex_ref = NULL;
-	atomic_long_set(&mm->futex_atomic, 0);
-	mm->futex_batches = get_state_synchronize_rcu();
+	memset(&mm->futex, 0, sizeof(mm->futex));
+	mutex_init(&mm->futex.phash_lock);
+	mm->futex.phash_batches = get_state_synchronize_rcu();
 	return 0;
 }
 
@@ -1733,9 +1724,9 @@ void futex_hash_free(struct mm_struct *m
 {
 	struct futex_private_hash *fph;
 
-	free_percpu(mm->futex_ref);
-	kvfree(mm->futex_phash_new);
-	fph = rcu_dereference_raw(mm->futex_phash);
+	free_percpu(mm->futex.phash_ref);
+	kvfree(mm->futex.phash_new);
+	fph = rcu_dereference_raw(mm->futex.phash);
 	if (fph)
 		kvfree(fph);
 }
@@ -1746,10 +1737,10 @@ static bool futex_pivot_pending(struct m
 
 	guard(rcu)();
 
-	if (!mm->futex_phash_new)
+	if (!mm->futex.phash_new)
 		return true;
 
-	fph = rcu_dereference(mm->futex_phash);
+	fph = rcu_dereference(mm->futex.phash);
 	return futex_ref_is_dead(fph);
 }
 
@@ -1791,7 +1782,7 @@ static int futex_hash_allocate(unsigned
 	 * Once we've disabled the global hash there is no way back.
 	 */
 	scoped_guard(rcu) {
-		fph = rcu_dereference(mm->futex_phash);
+		fph = rcu_dereference(mm->futex.phash);
 		if (fph && !fph->hash_mask) {
 			if (custom)
 				return -EBUSY;
@@ -1799,15 +1790,15 @@ static int futex_hash_allocate(unsigned
 		}
 	}
 
-	if (!mm->futex_ref) {
+	if (!mm->futex.phash_ref) {
 		/*
 		 * This will always be allocated by the first thread and
 		 * therefore requires no locking.
 		 */
-		mm->futex_ref = alloc_percpu(unsigned int);
-		if (!mm->futex_ref)
+		mm->futex.phash_ref = alloc_percpu(unsigned int);
+		if (!mm->futex.phash_ref)
 			return -ENOMEM;
-		this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
+		this_cpu_inc(*mm->futex.phash_ref); /* 0 -> 1 */
 	}
 
 	fph = kvzalloc(struct_size(fph, queues, hash_slots),
@@ -1830,14 +1821,14 @@ static int futex_hash_allocate(unsigned
 		wait_var_event(mm, futex_pivot_pending(mm));
 	}
 
-	scoped_guard(mutex, &mm->futex_hash_lock) {
+	scoped_guard(mutex, &mm->futex.phash_lock) {
 		struct futex_private_hash *free __free(kvfree) = NULL;
 		struct futex_private_hash *cur, *new;
 
-		cur = rcu_dereference_protected(mm->futex_phash,
-						lockdep_is_held(&mm->futex_hash_lock));
-		new = mm->futex_phash_new;
-		mm->futex_phash_new = NULL;
+		cur = rcu_dereference_protected(mm->futex.phash,
+						lockdep_is_held(&mm->futex.phash_lock));
+		new = mm->futex.phash_new;
+		mm->futex.phash_new = NULL;
 
 		if (fph) {
 			if (cur && !cur->hash_mask) {
@@ -1847,7 +1838,7 @@ static int futex_hash_allocate(unsigned
 				 * the second one returns here.
 				 */
 				free = fph;
-				mm->futex_phash_new = new;
+				mm->futex.phash_new = new;
 				return -EBUSY;
 			}
 			if (cur && !new) {
@@ -1877,7 +1868,7 @@ static int futex_hash_allocate(unsigned
 
 		if (new) {
 			/*
-			 * Will set mm->futex_phash_new on failure;
+			 * Will set mm->futex.phash_new on failure;
 			 * futex_private_hash_get() will try again.
 			 */
 			if (!__futex_pivot_hash(mm, new) && custom)
@@ -1900,7 +1891,7 @@ int futex_hash_allocate_default(void)
 				get_nr_threads(current),
 				num_online_cpus());
 
-		fph = rcu_dereference(current->mm->futex_phash);
+		fph = rcu_dereference(current->mm->futex.phash);
 		if (fph) {
 			if (fph->custom)
 				return 0;
@@ -1927,7 +1918,7 @@ static int futex_hash_get_slots(void)
 	struct futex_private_hash *fph;
 
 	guard(rcu)();
-	fph = rcu_dereference(current->mm->futex_phash);
+	fph = rcu_dereference(current->mm->futex.phash);
 	if (fph && fph->hash_mask)
 		return fph->hash_mask + 1;
 	return 0;
Re: [patch 2/8] futex: Move futex related mm_struct data into a struct
Posted by Mathieu Desnoyers 3 weeks ago
On 2026-03-16 13:13, Thomas Gleixner wrote:
> Having all these members in mm_struct along with the required #ifdeffery is
> annoying, does not allow efficient initializing of the data with
> memset() and makes extending it tedious.
> 
> Move it into a data structure and fix up all usage sites.
> 
> Signed-off-by: Thomas Gleixner <tglx@kernel.org>
> ---
>   include/linux/futex_types.h |   22 +++++++
>   include/linux/mm_types.h    |   11 ---
>   kernel/futex/core.c         |  123 ++++++++++++++++++++------------------------
>   3 files changed, 80 insertions(+), 76 deletions(-)
> 
> --- a/include/linux/futex_types.h
> +++ b/include/linux/futex_types.h
> @@ -31,4 +31,26 @@ struct futex_ctrl {
>   struct futex_ctrl { };
>   #endif /* !CONFIG_FUTEX */
>   
> +/**
> + * struct futex_mm_data - Futex related per MM data
> + * @phash_lock:		Mutex to protect the private hash operations
> + * @phash:		RCU managed pointer to the private hash
> + * @phash_new:		Pointer to a newly allocated private hash
> + * @phash_batches:	Batch state for RCU synchronization
> + * @phash_rcu:		RCU head for call_rcu()
> + * @phash_atomic:	Aggregate value for @phash_ref
> + * @phash_ref:		Per CPU reference counter for a private hash
> + */
> +struct futex_mm_data {
> +#ifdef CONFIG_FUTEX_PRIVATE_HASH
> +	struct mutex			phash_lock;
> +	struct futex_private_hash	__rcu *phash;
> +	struct futex_private_hash	*phash_new;
> +	unsigned long			phash_batches;
> +	struct rcu_head			phash_rcu;
> +	atomic_long_t			phash_atomic;
> +	unsigned int			__percpu *phash_ref;
> +#endif
> +};

In the previous patch you use the approach of declaring an empty
structure within the #else, and here the content of the struct is
conditional. Preferably pick one approach ?

Other than this minor nit:

Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>

-- 
Mathieu Desnoyers
EfficiOS Inc.
https://www.efficios.com