[RFC PATCH V3 14/17] trace/kscand: Add tracing of scanning and migration

Raghavendra K T posted 17 patches 1 month, 3 weeks ago
There is a newer version of this series
[RFC PATCH V3 14/17] trace/kscand: Add tracing of scanning and migration
Posted by Raghavendra K T 1 month, 3 weeks ago
Add tracing support to track
 - start and end of scanning.
 - migration.

CC: Steven Rostedt <rostedt@goodmis.org>
CC: Masami Hiramatsu <mhiramat@kernel.org>
CC: linux-trace-kernel@vger.kernel.org

Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
---
 include/trace/events/kmem.h | 99 +++++++++++++++++++++++++++++++++++++
 mm/kscand.c                 |  9 ++++
 2 files changed, 108 insertions(+)

diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f74925a6cf69..d6e544b067b9 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -9,6 +9,105 @@
 #include <linux/tracepoint.h>
 #include <trace/events/mmflags.h>
 
+#ifdef CONFIG_KSCAND
+DECLARE_EVENT_CLASS(kmem_mm_class,
+
+	TP_PROTO(struct mm_struct *mm),
+
+	TP_ARGS(mm),
+
+	TP_STRUCT__entry(
+		__field(	struct mm_struct *, mm		)
+	),
+
+	TP_fast_assign(
+		__entry->mm = mm;
+	),
+
+	TP_printk("mm = %p", __entry->mm)
+);
+
+DEFINE_EVENT(kmem_mm_class, kmem_mm_enter,
+	TP_PROTO(struct mm_struct *mm),
+	TP_ARGS(mm)
+);
+
+DEFINE_EVENT(kmem_mm_class, kmem_mm_exit,
+	TP_PROTO(struct mm_struct *mm),
+	TP_ARGS(mm)
+);
+
+DEFINE_EVENT(kmem_mm_class, kmem_scan_mm_start,
+	TP_PROTO(struct mm_struct *mm),
+	TP_ARGS(mm)
+);
+
+TRACE_EVENT(kmem_scan_mm_end,
+
+	TP_PROTO( struct mm_struct *mm,
+		 unsigned long start,
+		 unsigned long total,
+		 unsigned long scan_period,
+		 unsigned long scan_size,
+		 int target_node),
+
+	TP_ARGS(mm, start, total, scan_period, scan_size, target_node),
+
+	TP_STRUCT__entry(
+		__field(	struct mm_struct *, mm		)
+		__field(	unsigned long,   start		)
+		__field(	unsigned long,   total		)
+		__field(	unsigned long,   scan_period	)
+		__field(	unsigned long,   scan_size	)
+		__field(	int,		 target_node	)
+	),
+
+	TP_fast_assign(
+		__entry->mm = mm;
+		__entry->start = start;
+		__entry->total = total;
+		__entry->scan_period  = scan_period;
+		__entry->scan_size    = scan_size;
+		__entry->target_node  = target_node;
+	),
+
+	TP_printk("mm=%p, start = %ld, total = %ld, scan_period = %ld, scan_size = %ld node = %d",
+		__entry->mm, __entry->start, __entry->total, __entry->scan_period,
+		__entry->scan_size, __entry->target_node)
+);
+
+TRACE_EVENT(kmem_scan_mm_migrate,
+
+	TP_PROTO(struct mm_struct *mm,
+		 int rc,
+		 int target_node,
+		 int msuccess,
+		 int mfailed),
+
+	TP_ARGS(mm, rc, target_node, msuccess, mfailed),
+
+	TP_STRUCT__entry(
+		__field(	struct mm_struct *, mm	)
+		__field(	int,   rc		)
+		__field(	int,   target_node	)
+		__field(	int,   msuccess		)
+		__field(	int,   mfailed		)
+	),
+
+	TP_fast_assign(
+		__entry->mm = mm;
+		__entry->rc = rc;
+		__entry->target_node = target_node;
+		__entry->msuccess = msuccess;
+		__entry->mfailed = mfailed;
+	),
+
+	TP_printk("mm = %p rc = %d node = %d msuccess = %d mfailed = %d ",
+		__entry->mm, __entry->rc, __entry->target_node,
+		__entry->msuccess, __entry->mfailed)
+);
+#endif
+
 TRACE_EVENT(kmem_cache_alloc,
 
 	TP_PROTO(unsigned long call_site,
diff --git a/mm/kscand.c b/mm/kscand.c
index e14645565ba7..273306f47553 100644
--- a/mm/kscand.c
+++ b/mm/kscand.c
@@ -1105,6 +1105,7 @@ static void kmigrated_migrate_mm(struct kmigrated_mm_slot *mm_slot)
 				count_kscand_migrate_failed();
 				mfailed++;
 			}
+			trace_kmem_scan_mm_migrate(mm, ret, dest, msuccess, mfailed);
 
 			kfree(info);
 
@@ -1308,6 +1309,9 @@ static unsigned long kscand_scan_mm_slot(void)
 	mm_target_node = READ_ONCE(mm->target_node);
 	if (mm_target_node != mm_slot_target_node)
 		WRITE_ONCE(mm->target_node, mm_slot_target_node);
+
+	trace_kmem_scan_mm_start(mm);
+
 	now = jiffies;
 
 	if (mm_slot_next_scan && time_before(now, mm_slot_next_scan))
@@ -1378,6 +1382,9 @@ static unsigned long kscand_scan_mm_slot(void)
 		kscand_update_mmslot_info(mm_slot, total, target_node);
 	}
 
+	trace_kmem_scan_mm_end(mm, address, total, mm_slot_scan_period,
+			mm_slot_scan_size, target_node);
+
 outerloop:
 	/* exit_mmap will destroy ptes after this */
 	mmap_read_unlock(mm);
@@ -1530,6 +1537,7 @@ void __kscand_enter(struct mm_struct *mm)
 	spin_unlock(&kscand_mm_lock);
 
 	mmgrab(mm);
+	trace_kmem_mm_enter(mm);
 	if (wakeup)
 		wake_up_interruptible(&kscand_wait);
 }
@@ -1540,6 +1548,7 @@ void __kscand_exit(struct mm_struct *mm)
 	struct mm_slot *slot;
 	int free = 0, serialize = 1;
 
+	trace_kmem_mm_exit(mm);
 	spin_lock(&kscand_mm_lock);
 	slot = mm_slot_lookup(kscand_slots_hash, mm);
 	mm_slot = mm_slot_entry(slot, struct kscand_mm_slot, slot);
-- 
2.34.1
Re: [RFC PATCH V3 14/17] trace/kscand: Add tracing of scanning and migration
Posted by Jonathan Cameron 1 day, 5 hours ago
On Thu, 14 Aug 2025 15:33:04 +0000
Raghavendra K T <raghavendra.kt@amd.com> wrote:

> Add tracing support to track
>  - start and end of scanning.
>  - migration.
> 
> CC: Steven Rostedt <rostedt@goodmis.org>
> CC: Masami Hiramatsu <mhiramat@kernel.org>
> CC: linux-trace-kernel@vger.kernel.org
> 
CC s are part of tags block so no blank line.

Probably move them under the --- as I doubt we need to keep these
in the git log long term.

> Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
> ---
>  include/trace/events/kmem.h | 99 +++++++++++++++++++++++++++++++++++++
>  mm/kscand.c                 |  9 ++++
>  2 files changed, 108 insertions(+)
> 
> diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
> index f74925a6cf69..d6e544b067b9 100644
> --- a/include/trace/events/kmem.h
> +++ b/include/trace/events/kmem.h
> @@ -9,6 +9,105 @@
>  #include <linux/tracepoint.h>
>  #include <trace/events/mmflags.h>
>  
> +#ifdef CONFIG_KSCAND
> +DECLARE_EVENT_CLASS(kmem_mm_class,
> +
> +	TP_PROTO(struct mm_struct *mm),
> +
> +	TP_ARGS(mm),
> +
> +	TP_STRUCT__entry(
> +		__field(	struct mm_struct *, mm		)

Trace header formatting is sometimes interesting. But I have no
idea why you have this padded like that.

> +	),
> +
> +	TP_fast_assign(
> +		__entry->mm = mm;
> +	),
> +
> +	TP_printk("mm = %p", __entry->mm)
> +);
> +
> +DEFINE_EVENT(kmem_mm_class, kmem_mm_enter,
> +	TP_PROTO(struct mm_struct *mm),
> +	TP_ARGS(mm)
> +);
> +
> +DEFINE_EVENT(kmem_mm_class, kmem_mm_exit,
> +	TP_PROTO(struct mm_struct *mm),
> +	TP_ARGS(mm)
> +);
> +
> +DEFINE_EVENT(kmem_mm_class, kmem_scan_mm_start,
> +	TP_PROTO(struct mm_struct *mm),
> +	TP_ARGS(mm)
> +);
> +
> +TRACE_EVENT(kmem_scan_mm_end,
> +
> +	TP_PROTO( struct mm_struct *mm,
> +		 unsigned long start,
> +		 unsigned long total,
> +		 unsigned long scan_period,
> +		 unsigned long scan_size,
> +		 int target_node),
> +
> +	TP_ARGS(mm, start, total, scan_period, scan_size, target_node),
> +
> +	TP_STRUCT__entry(
> +		__field(	struct mm_struct *, mm		)
> +		__field(	unsigned long,   start		)
> +		__field(	unsigned long,   total		)
> +		__field(	unsigned long,   scan_period	)
> +		__field(	unsigned long,   scan_size	)
> +		__field(	int,		 target_node	)

Similar. Aligning stuff might make sense but why the spacing before the type?