[RFC patch v3 10/20] sched: Calculate the number of tasks that have LLC preference on a runqueue

Tim Chen posted 20 patches 3 months, 3 weeks ago
[RFC patch v3 10/20] sched: Calculate the number of tasks that have LLC preference on a runqueue
Posted by Tim Chen 3 months, 3 weeks ago
Track for each run queue, the number of tasks that have a LLC preference
and how many of those tasks are running in its preferred LLC.  This is
similar to nr_numa_running and nr_preferred_running for NUMA balance,
and will be used by the cache-aware load balancing in subsequent patches.

Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
 kernel/sched/core.c  | 12 ++++++++++++
 kernel/sched/fair.c  | 42 +++++++++++++++++++++++++++++++++++++++++-
 kernel/sched/sched.h |  7 +++++++
 3 files changed, 60 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d9c3e75f79d1..34056eb79ef2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -498,6 +498,18 @@ void __trace_set_current_state(int state_value)
 }
 EXPORT_SYMBOL(__trace_set_current_state);
 
+#ifdef CONFIG_SMP
+int task_llc(const struct task_struct *p)
+{
+	return per_cpu(sd_llc_id, task_cpu(p));
+}
+#else
+int task_llc(const struct task_struct *p)
+{
+	return 0;
+}
+#endif
+
 /*
  * Serialization rules:
  *
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cc804a8c7061..88ff47194faa 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1195,6 +1195,18 @@ static inline int llc_idx(int cpu)
 	return per_cpu(sd_llc_idx, cpu);
 }
 
+static void account_llc_enqueue(struct rq *rq, struct task_struct *p)
+{
+	rq->nr_llc_running += (p->preferred_llc != -1);
+	rq->nr_pref_llc_running += (p->preferred_llc == task_llc(p));
+}
+
+static void account_llc_dequeue(struct rq *rq, struct task_struct *p)
+{
+	rq->nr_llc_running -= (p->preferred_llc != -1);
+	rq->nr_pref_llc_running -= (p->preferred_llc == task_llc(p));
+}
+
 void mm_init_sched(struct mm_struct *mm, struct mm_sched __percpu *_pcpu_sched)
 {
 	unsigned long epoch;
@@ -1298,8 +1310,11 @@ void account_mm_sched(struct rq *rq, struct task_struct *p, s64 delta_exec)
 	if (mm->mm_sched_cpu != -1)
 		mm_sched_llc = per_cpu(sd_llc_id, mm->mm_sched_cpu);
 
-	if (p->preferred_llc != mm_sched_llc)
+	if (p->preferred_llc != mm_sched_llc) {
+		account_llc_dequeue(rq, p);
 		p->preferred_llc = mm_sched_llc;
+		account_llc_enqueue(rq, p);
+	}
 }
 
 static void task_tick_cache(struct rq *rq, struct task_struct *p)
@@ -1400,6 +1415,14 @@ void init_sched_mm(struct task_struct *p)
 	work->next = work;
 }
 
+void reset_llc_stats(struct rq *rq)
+{
+	if (rq->nr_llc_running)
+		rq->nr_llc_running = 0;
+
+	rq->nr_pref_llc_running = 0;
+}
+
 #else
 
 static inline void account_mm_sched(struct rq *rq, struct task_struct *p,
@@ -1410,6 +1433,17 @@ void init_sched_mm(struct task_struct *p) { }
 
 static void task_tick_cache(struct rq *rq, struct task_struct *p) { }
 
+static void account_llc_enqueue(struct rq *rq, struct task_struct *p)
+{
+}
+
+static void account_llc_dequeue(struct rq *rq, struct task_struct *p)
+{
+}
+
+void reset_llc_stats(struct rq *rq)
+{
+}
 #endif
 
 static inline
@@ -3939,6 +3973,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 		struct rq *rq = rq_of(cfs_rq);
 
 		account_numa_enqueue(rq, task_of(se));
+		account_llc_enqueue(rq, task_of(se));
 		list_add(&se->group_node, &rq->cfs_tasks);
 	}
 #endif
@@ -3952,10 +3987,15 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 #ifdef CONFIG_SMP
 	if (entity_is_task(se)) {
 		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
+		account_llc_dequeue(rq_of(cfs_rq), task_of(se));
 		list_del_init(&se->group_node);
 	}
 #endif
 	cfs_rq->nr_queued--;
+
+	/* safeguard? */
+	if (!parent_entity(se) && !cfs_rq->nr_queued)
+		reset_llc_stats(rq_of(cfs_rq));
 }
 
 /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 74eb2f3615aa..6c83a71ac8ca 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1104,6 +1104,10 @@ struct rq {
 	unsigned int		nr_preferred_running;
 	unsigned int		numa_migrate_on;
 #endif
+#ifdef CONFIG_SCHED_CACHE
+	unsigned int		nr_pref_llc_running;
+	unsigned int		nr_llc_running;
+#endif
 #ifdef CONFIG_NO_HZ_COMMON
 #ifdef CONFIG_SMP
 	unsigned long		last_blocked_load_update_tick;
@@ -1948,6 +1952,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
 
 #endif /* !CONFIG_NUMA_BALANCING */
 
+extern void reset_llc_stats(struct rq *rq);
+extern int task_llc(const struct task_struct *p);
+
 #ifdef CONFIG_SMP
 
 static inline void
-- 
2.32.0
Re: [RFC patch v3 10/20] sched: Calculate the number of tasks that have LLC preference on a runqueue
Posted by Shrikanth Hegde 3 months, 1 week ago

On 6/18/25 23:57, Tim Chen wrote:
> Track for each run queue, the number of tasks that have a LLC preference
> and how many of those tasks are running in its preferred LLC.  This is
> similar to nr_numa_running and nr_preferred_running for NUMA balance,
> and will be used by the cache-aware load balancing in subsequent patches.
> 
> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
> ---
>   kernel/sched/core.c  | 12 ++++++++++++
>   kernel/sched/fair.c  | 42 +++++++++++++++++++++++++++++++++++++++++-
>   kernel/sched/sched.h |  7 +++++++
>   3 files changed, 60 insertions(+), 1 deletion(-)
> 
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index d9c3e75f79d1..34056eb79ef2 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -498,6 +498,18 @@ void __trace_set_current_state(int state_value)
>   }
>   EXPORT_SYMBOL(__trace_set_current_state);
>   
> +#ifdef CONFIG_SMP


CONFIG_SMP is true unconditionally now. Else may need to go.

> +int task_llc(const struct task_struct *p)
> +{
> +	return per_cpu(sd_llc_id, task_cpu(p));
> +}
> +#else
> +int task_llc(const struct task_struct *p)
> +{
> +	return 0;
> +}
> +#endif
> +
>   /*
>    * Serialization rules:
>    *
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index cc804a8c7061..88ff47194faa 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1195,6 +1195,18 @@ static inline int llc_idx(int cpu)
>   	return per_cpu(sd_llc_idx, cpu);
>   }
>   
> +static void account_llc_enqueue(struct rq *rq, struct task_struct *p)
> +{
> +	rq->nr_llc_running += (p->preferred_llc != -1);
> +	rq->nr_pref_llc_running += (p->preferred_llc == task_llc(p));
> +}
> +
> +static void account_llc_dequeue(struct rq *rq, struct task_struct *p)
> +{
> +	rq->nr_llc_running -= (p->preferred_llc != -1);
> +	rq->nr_pref_llc_running -= (p->preferred_llc == task_llc(p));
> +}
> +
>   void mm_init_sched(struct mm_struct *mm, struct mm_sched __percpu *_pcpu_sched)
>   {
>   	unsigned long epoch;
> @@ -1298,8 +1310,11 @@ void account_mm_sched(struct rq *rq, struct task_struct *p, s64 delta_exec)
>   	if (mm->mm_sched_cpu != -1)
>   		mm_sched_llc = per_cpu(sd_llc_id, mm->mm_sched_cpu);
>   
> -	if (p->preferred_llc != mm_sched_llc)
> +	if (p->preferred_llc != mm_sched_llc) {
> +		account_llc_dequeue(rq, p);
>   		p->preferred_llc = mm_sched_llc;
> +		account_llc_enqueue(rq, p);
> +	}
>   }
>   
>   static void task_tick_cache(struct rq *rq, struct task_struct *p)
> @@ -1400,6 +1415,14 @@ void init_sched_mm(struct task_struct *p)
>   	work->next = work;
>   }
>   
> +void reset_llc_stats(struct rq *rq)
> +{
> +	if (rq->nr_llc_running)
> +		rq->nr_llc_running = 0;
> +
> +	rq->nr_pref_llc_running = 0;
> +}
> +
>   #else
>   
>   static inline void account_mm_sched(struct rq *rq, struct task_struct *p,
> @@ -1410,6 +1433,17 @@ void init_sched_mm(struct task_struct *p) { }
>   
>   static void task_tick_cache(struct rq *rq, struct task_struct *p) { }
>   
> +static void account_llc_enqueue(struct rq *rq, struct task_struct *p)
> +{
> +}
> +
> +static void account_llc_dequeue(struct rq *rq, struct task_struct *p)
> +{
> +}
> +
> +void reset_llc_stats(struct rq *rq)
> +{
> +}
>   #endif
>   
>   static inline
> @@ -3939,6 +3973,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
>   		struct rq *rq = rq_of(cfs_rq);
>   
>   		account_numa_enqueue(rq, task_of(se));
> +		account_llc_enqueue(rq, task_of(se));
>   		list_add(&se->group_node, &rq->cfs_tasks);
>   	}
>   #endif
> @@ -3952,10 +3987,15 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
>   #ifdef CONFIG_SMP
>   	if (entity_is_task(se)) {
>   		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
> +		account_llc_dequeue(rq_of(cfs_rq), task_of(se));
>   		list_del_init(&se->group_node);
>   	}
>   #endif
>   	cfs_rq->nr_queued--;
> +
> +	/* safeguard? */
> +	if (!parent_entity(se) && !cfs_rq->nr_queued)
> +		reset_llc_stats(rq_of(cfs_rq));
>   }
>   
>   /*
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 74eb2f3615aa..6c83a71ac8ca 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1104,6 +1104,10 @@ struct rq {
>   	unsigned int		nr_preferred_running;
>   	unsigned int		numa_migrate_on;
>   #endif
> +#ifdef CONFIG_SCHED_CACHE
> +	unsigned int		nr_pref_llc_running;
> +	unsigned int		nr_llc_running;
> +#endif
>   #ifdef CONFIG_NO_HZ_COMMON
>   #ifdef CONFIG_SMP
>   	unsigned long		last_blocked_load_update_tick;
> @@ -1948,6 +1952,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
>   
>   #endif /* !CONFIG_NUMA_BALANCING */
>   
> +extern void reset_llc_stats(struct rq *rq);
> +extern int task_llc(const struct task_struct *p);
> +
>   #ifdef CONFIG_SMP
>   
>   static inline void
Re: [RFC patch v3 10/20] sched: Calculate the number of tasks that have LLC preference on a runqueue
Posted by Chen, Yu C 3 months ago
On 7/4/2025 3:45 AM, Shrikanth Hegde wrote:
> 
> 
> On 6/18/25 23:57, Tim Chen wrote:
>> Track for each run queue, the number of tasks that have a LLC preference
>> and how many of those tasks are running in its preferred LLC.  This is
>> similar to nr_numa_running and nr_preferred_running for NUMA balance,
>> and will be used by the cache-aware load balancing in subsequent patches.
>>
>> Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
>> ---
>>   kernel/sched/core.c  | 12 ++++++++++++
>>   kernel/sched/fair.c  | 42 +++++++++++++++++++++++++++++++++++++++++-
>>   kernel/sched/sched.h |  7 +++++++
>>   3 files changed, 60 insertions(+), 1 deletion(-)
>>
>> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
>> index d9c3e75f79d1..34056eb79ef2 100644
>> --- a/kernel/sched/core.c
>> +++ b/kernel/sched/core.c
>> @@ -498,6 +498,18 @@ void __trace_set_current_state(int state_value)
>>   }
>>   EXPORT_SYMBOL(__trace_set_current_state);
>> +#ifdef CONFIG_SMP
> 
> 
> CONFIG_SMP is true unconditionally now. Else may need to go.
> 

OK. I suppose it will take effect from 6.17? We can remove this control
after rebasing to that version.

thanks,
Chenyu