[patch V2 13/37] sched: Move MM CID related functions to sched.h

Thomas Gleixner posted 37 patches 1 month, 1 week ago
There is a newer version of this series
[patch V2 13/37] sched: Move MM CID related functions to sched.h
Posted by Thomas Gleixner 1 month, 1 week ago
There is nothing mm specific in that and including mm.h can cause header
recursion hell.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 include/linux/mm.h    |   25 -------------------------
 include/linux/sched.h |   26 ++++++++++++++++++++++++++
 2 files changed, 26 insertions(+), 25 deletions(-)

--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2310,31 +2310,6 @@ struct zap_details {
 /* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
 #define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
 
-#ifdef CONFIG_SCHED_MM_CID
-void sched_mm_cid_before_execve(struct task_struct *t);
-void sched_mm_cid_after_execve(struct task_struct *t);
-void sched_mm_cid_fork(struct task_struct *t);
-void sched_mm_cid_exit_signals(struct task_struct *t);
-static inline int task_mm_cid(struct task_struct *t)
-{
-	return t->mm_cid;
-}
-#else
-static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
-static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
-static inline void sched_mm_cid_fork(struct task_struct *t) { }
-static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
-static inline int task_mm_cid(struct task_struct *t)
-{
-	/*
-	 * Use the processor id as a fall-back when the mm cid feature is
-	 * disabled. This provides functional per-cpu data structure accesses
-	 * in user-space, althrough it won't provide the memory usage benefits.
-	 */
-	return raw_smp_processor_id();
-}
-#endif
-
 #ifdef CONFIG_MMU
 extern bool can_do_mlock(void);
 #else
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2309,4 +2309,30 @@ static __always_inline void alloc_tag_re
 #define alloc_tag_restore(_tag, _old)		do {} while (0)
 #endif
 
+/* Avoids recursive inclusion hell */
+#ifdef CONFIG_SCHED_MM_CID
+void sched_mm_cid_before_execve(struct task_struct *t);
+void sched_mm_cid_after_execve(struct task_struct *t);
+void sched_mm_cid_fork(struct task_struct *t);
+void sched_mm_cid_exit_signals(struct task_struct *t);
+static inline int task_mm_cid(struct task_struct *t)
+{
+	return t->mm_cid;
+}
+#else
+static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_fork(struct task_struct *t) { }
+static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
+static inline int task_mm_cid(struct task_struct *t)
+{
+	/*
+	 * Use the processor id as a fall-back when the mm cid feature is
+	 * disabled. This provides functional per-cpu data structure accesses
+	 * in user-space, althrough it won't provide the memory usage benefits.
+	 */
+	return task_cpu(t);
+}
+#endif
+
 #endif
Re: [patch V2 13/37] sched: Move MM CID related functions to sched.h
Posted by Mathieu Desnoyers 1 month, 1 week ago
On 2025-08-23 12:39, Thomas Gleixner wrote:
> There is nothing mm specific in that and including mm.h can cause header
> recursion hell.

Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>

> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
>   include/linux/mm.h    |   25 -------------------------
>   include/linux/sched.h |   26 ++++++++++++++++++++++++++
>   2 files changed, 26 insertions(+), 25 deletions(-)
> 
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2310,31 +2310,6 @@ struct zap_details {
>   /* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
>   #define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
>   
> -#ifdef CONFIG_SCHED_MM_CID
> -void sched_mm_cid_before_execve(struct task_struct *t);
> -void sched_mm_cid_after_execve(struct task_struct *t);
> -void sched_mm_cid_fork(struct task_struct *t);
> -void sched_mm_cid_exit_signals(struct task_struct *t);
> -static inline int task_mm_cid(struct task_struct *t)
> -{
> -	return t->mm_cid;
> -}
> -#else
> -static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
> -static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
> -static inline void sched_mm_cid_fork(struct task_struct *t) { }
> -static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
> -static inline int task_mm_cid(struct task_struct *t)
> -{
> -	/*
> -	 * Use the processor id as a fall-back when the mm cid feature is
> -	 * disabled. This provides functional per-cpu data structure accesses
> -	 * in user-space, althrough it won't provide the memory usage benefits.
> -	 */
> -	return raw_smp_processor_id();
> -}
> -#endif
> -
>   #ifdef CONFIG_MMU
>   extern bool can_do_mlock(void);
>   #else
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -2309,4 +2309,30 @@ static __always_inline void alloc_tag_re
>   #define alloc_tag_restore(_tag, _old)		do {} while (0)
>   #endif
>   
> +/* Avoids recursive inclusion hell */
> +#ifdef CONFIG_SCHED_MM_CID
> +void sched_mm_cid_before_execve(struct task_struct *t);
> +void sched_mm_cid_after_execve(struct task_struct *t);
> +void sched_mm_cid_fork(struct task_struct *t);
> +void sched_mm_cid_exit_signals(struct task_struct *t);
> +static inline int task_mm_cid(struct task_struct *t)
> +{
> +	return t->mm_cid;
> +}
> +#else
> +static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
> +static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
> +static inline void sched_mm_cid_fork(struct task_struct *t) { }
> +static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
> +static inline int task_mm_cid(struct task_struct *t)
> +{
> +	/*
> +	 * Use the processor id as a fall-back when the mm cid feature is
> +	 * disabled. This provides functional per-cpu data structure accesses
> +	 * in user-space, althrough it won't provide the memory usage benefits.
> +	 */
> +	return task_cpu(t);
> +}
> +#endif
> +
>   #endif
> 


-- 
Mathieu Desnoyers
EfficiOS Inc.
https://www.efficios.com