From: Chen Yu <yu.c.chen@intel.com>
Debug patch only.
The user leverages this trace event (via bpftrace, etc)to monitor the cache
aware load balance activity - whether the tasks are moved to their preferred
LLC, or moved out of their preferred LLC.
Signed-off-by: Chen Yu <yu.c.chen@intel.com>
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
include/trace/events/sched.h | 31 +++++++++++++++++++++++++++++++
kernel/sched/fair.c | 10 ++++++++++
2 files changed, 41 insertions(+)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 7b2645b50e78..bd03f49f7e3c 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -10,6 +10,37 @@
#include <linux/tracepoint.h>
#include <linux/binfmts.h>
+TRACE_EVENT(sched_attach_task,
+
+ TP_PROTO(struct task_struct *t, int pref_cpu, int pref_llc,
+ int attach_cpu, int attach_llc),
+
+ TP_ARGS(t, pref_cpu, pref_llc, attach_cpu, attach_llc),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ __field( pid_t, pid )
+ __field( int, pref_cpu )
+ __field( int, pref_llc )
+ __field( int, attach_cpu )
+ __field( int, attach_llc )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
+ __entry->pid = t->pid;
+ __entry->pref_cpu = pref_cpu;
+ __entry->pref_llc = pref_llc;
+ __entry->attach_cpu = attach_cpu;
+ __entry->attach_llc = attach_llc;
+ ),
+
+ TP_printk("comm=%s pid=%d pref_cpu=%d pref_llc=%d attach_cpu=%d attach_llc=%d",
+ __entry->comm, __entry->pid,
+ __entry->pref_cpu, __entry->pref_llc,
+ __entry->attach_cpu, __entry->attach_llc)
+);
+
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
*/
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 742e455b093e..e47b4096f0a6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10487,6 +10487,16 @@ static void attach_task(struct rq *rq, struct task_struct *p)
{
lockdep_assert_rq_held(rq);
+#ifdef CONFIG_SCHED_CACHE
+ if (p->mm) {
+ int pref_cpu = p->mm->mm_sched_cpu;
+
+ trace_sched_attach_task(p,
+ pref_cpu,
+ pref_cpu != -1 ? llc_id(pref_cpu) : -1,
+ cpu_of(rq), llc_id(cpu_of(rq)));
+ }
+#endif
WARN_ON_ONCE(task_rq(p) != rq);
activate_task(rq, p, ENQUEUE_NOCLOCK);
wakeup_preempt(rq, p, 0);
--
2.32.0