Add tracing support to track
- start and end of scanning.
- migration.
CC: Steven Rostedt <rostedt@goodmis.org>
CC: Masami Hiramatsu <mhiramat@kernel.org>
CC: linux-trace-kernel@vger.kernel.org
Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
---
include/trace/events/kmem.h | 90 +++++++++++++++++++++++++++++++++++++
mm/kscand.c | 8 ++++
2 files changed, 98 insertions(+)
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f74925a6cf69..682c4015414f 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -9,6 +9,96 @@
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
+DECLARE_EVENT_CLASS(kmem_mm_class,
+
+ TP_PROTO(struct mm_struct *mm),
+
+ TP_ARGS(mm),
+
+ TP_STRUCT__entry(
+ __field( struct mm_struct *, mm )
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ ),
+
+ TP_printk("mm = %p", __entry->mm)
+);
+
+DEFINE_EVENT(kmem_mm_class, kmem_mm_enter,
+ TP_PROTO(struct mm_struct *mm),
+ TP_ARGS(mm)
+);
+
+DEFINE_EVENT(kmem_mm_class, kmem_mm_exit,
+ TP_PROTO(struct mm_struct *mm),
+ TP_ARGS(mm)
+);
+
+DEFINE_EVENT(kmem_mm_class, kmem_scan_mm_start,
+ TP_PROTO(struct mm_struct *mm),
+ TP_ARGS(mm)
+);
+
+TRACE_EVENT(kmem_scan_mm_end,
+
+ TP_PROTO( struct mm_struct *mm,
+ unsigned long start,
+ unsigned long total,
+ unsigned long scan_period,
+ unsigned long scan_size,
+ int target_node),
+
+ TP_ARGS(mm, start, total, scan_period, scan_size, target_node),
+
+ TP_STRUCT__entry(
+ __field( struct mm_struct *, mm )
+ __field( unsigned long, start )
+ __field( unsigned long, total )
+ __field( unsigned long, scan_period )
+ __field( unsigned long, scan_size )
+ __field( int, target_node )
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->start = start;
+ __entry->total = total;
+ __entry->scan_period = scan_period;
+ __entry->scan_size = scan_size;
+ __entry->target_node = target_node;
+ ),
+
+ TP_printk("mm=%p, start = %ld, total = %ld, scan_period = %ld, scan_size = %ld node = %d",
+ __entry->mm, __entry->start, __entry->total, __entry->scan_period,
+ __entry->scan_size, __entry->target_node)
+);
+
+TRACE_EVENT(kmem_scan_mm_migrate,
+
+ TP_PROTO(struct mm_struct *mm,
+ int rc,
+ int target_node),
+
+ TP_ARGS(mm, rc, target_node),
+
+ TP_STRUCT__entry(
+ __field( struct mm_struct *, mm )
+ __field( int, rc )
+ __field( int, target_node )
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->rc = rc;
+ __entry->target_node = target_node;
+ ),
+
+ TP_printk("mm = %p rc = %d node = %d",
+ __entry->mm, __entry->rc, __entry->target_node)
+);
+
TRACE_EVENT(kmem_cache_alloc,
TP_PROTO(unsigned long call_site,
diff --git a/mm/kscand.c b/mm/kscand.c
index db7b2f940f36..029d6d2bedc3 100644
--- a/mm/kscand.c
+++ b/mm/kscand.c
@@ -1035,6 +1035,7 @@ static void kmigrated_migrate_mm(struct kmigrated_mm_slot *mm_slot)
}
ret = kmigrated_promote_folio(info, mm, dest);
+ trace_kmem_scan_mm_migrate(mm, ret, dest);
/* TBD: encode migrated count here, currently assume folio_nr_pages */
if (!ret)
@@ -1230,6 +1231,9 @@ static unsigned long kscand_scan_mm_slot(void)
mm_target_node = READ_ONCE(mm->target_node);
if (mm_target_node != mm_slot_target_node)
WRITE_ONCE(mm->target_node, mm_slot_target_node);
+
+ trace_kmem_scan_mm_start(mm);
+
now = jiffies;
if (mm_slot_next_scan && time_before(now, mm_slot_next_scan))
@@ -1300,6 +1304,8 @@ static unsigned long kscand_scan_mm_slot(void)
kscand_update_mmslot_info(mm_slot, total, target_node);
}
+ trace_kmem_scan_mm_end(mm, address, total, mm_slot_scan_period,
+ mm_slot_scan_size, target_node);
outerloop:
/* exit_mmap will destroy ptes after this */
@@ -1453,6 +1459,7 @@ void __kscand_enter(struct mm_struct *mm)
spin_unlock(&kscand_mm_lock);
mmgrab(mm);
+ trace_kmem_mm_enter(mm);
if (wakeup)
wake_up_interruptible(&kscand_wait);
}
@@ -1463,6 +1470,7 @@ void __kscand_exit(struct mm_struct *mm)
struct mm_slot *slot;
int free = 0, serialize = 1;
+ trace_kmem_mm_exit(mm);
spin_lock(&kscand_mm_lock);
slot = mm_slot_lookup(kscand_slots_hash, mm);
mm_slot = mm_slot_entry(slot, struct kscand_mm_slot, slot);
--
2.34.1
On Tue, 24 Jun 2025 05:56:16 +0000
Raghavendra K T <raghavendra.kt@amd.com> wrote:
> Add tracing support to track
> - start and end of scanning.
> - migration.
>
> CC: Steven Rostedt <rostedt@goodmis.org>
> CC: Masami Hiramatsu <mhiramat@kernel.org>
> CC: linux-trace-kernel@vger.kernel.org
>
> Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com>
> ---
> include/trace/events/kmem.h | 90 +++++++++++++++++++++++++++++++++++++
> mm/kscand.c | 8 ++++
> 2 files changed, 98 insertions(+)
>
> diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
> index f74925a6cf69..682c4015414f 100644
> --- a/include/trace/events/kmem.h
> +++ b/include/trace/events/kmem.h
> @@ -9,6 +9,96 @@
> #include <linux/tracepoint.h>
> #include <trace/events/mmflags.h>
>
Please make sure the event is not exposed when it is not used.
#ifdef CONFIG_KSCAND
Thank you,
> +DECLARE_EVENT_CLASS(kmem_mm_class,
> +
> + TP_PROTO(struct mm_struct *mm),
> +
> + TP_ARGS(mm),
> +
> + TP_STRUCT__entry(
> + __field( struct mm_struct *, mm )
> + ),
> +
> + TP_fast_assign(
> + __entry->mm = mm;
> + ),
> +
> + TP_printk("mm = %p", __entry->mm)
> +);
> +
> +DEFINE_EVENT(kmem_mm_class, kmem_mm_enter,
> + TP_PROTO(struct mm_struct *mm),
> + TP_ARGS(mm)
> +);
> +
> +DEFINE_EVENT(kmem_mm_class, kmem_mm_exit,
> + TP_PROTO(struct mm_struct *mm),
> + TP_ARGS(mm)
> +);
> +
> +DEFINE_EVENT(kmem_mm_class, kmem_scan_mm_start,
> + TP_PROTO(struct mm_struct *mm),
> + TP_ARGS(mm)
> +);
> +
> +TRACE_EVENT(kmem_scan_mm_end,
> +
> + TP_PROTO( struct mm_struct *mm,
> + unsigned long start,
> + unsigned long total,
> + unsigned long scan_period,
> + unsigned long scan_size,
> + int target_node),
> +
> + TP_ARGS(mm, start, total, scan_period, scan_size, target_node),
> +
> + TP_STRUCT__entry(
> + __field( struct mm_struct *, mm )
> + __field( unsigned long, start )
> + __field( unsigned long, total )
> + __field( unsigned long, scan_period )
> + __field( unsigned long, scan_size )
> + __field( int, target_node )
> + ),
> +
> + TP_fast_assign(
> + __entry->mm = mm;
> + __entry->start = start;
> + __entry->total = total;
> + __entry->scan_period = scan_period;
> + __entry->scan_size = scan_size;
> + __entry->target_node = target_node;
> + ),
> +
> + TP_printk("mm=%p, start = %ld, total = %ld, scan_period = %ld, scan_size = %ld node = %d",
> + __entry->mm, __entry->start, __entry->total, __entry->scan_period,
> + __entry->scan_size, __entry->target_node)
> +);
> +
> +TRACE_EVENT(kmem_scan_mm_migrate,
> +
> + TP_PROTO(struct mm_struct *mm,
> + int rc,
> + int target_node),
> +
> + TP_ARGS(mm, rc, target_node),
> +
> + TP_STRUCT__entry(
> + __field( struct mm_struct *, mm )
> + __field( int, rc )
> + __field( int, target_node )
> + ),
> +
> + TP_fast_assign(
> + __entry->mm = mm;
> + __entry->rc = rc;
> + __entry->target_node = target_node;
> + ),
> +
> + TP_printk("mm = %p rc = %d node = %d",
> + __entry->mm, __entry->rc, __entry->target_node)
> +);
> +
> TRACE_EVENT(kmem_cache_alloc,
>
> TP_PROTO(unsigned long call_site,
> diff --git a/mm/kscand.c b/mm/kscand.c
> index db7b2f940f36..029d6d2bedc3 100644
> --- a/mm/kscand.c
> +++ b/mm/kscand.c
> @@ -1035,6 +1035,7 @@ static void kmigrated_migrate_mm(struct kmigrated_mm_slot *mm_slot)
> }
>
> ret = kmigrated_promote_folio(info, mm, dest);
> + trace_kmem_scan_mm_migrate(mm, ret, dest);
>
> /* TBD: encode migrated count here, currently assume folio_nr_pages */
> if (!ret)
> @@ -1230,6 +1231,9 @@ static unsigned long kscand_scan_mm_slot(void)
> mm_target_node = READ_ONCE(mm->target_node);
> if (mm_target_node != mm_slot_target_node)
> WRITE_ONCE(mm->target_node, mm_slot_target_node);
> +
> + trace_kmem_scan_mm_start(mm);
> +
> now = jiffies;
>
> if (mm_slot_next_scan && time_before(now, mm_slot_next_scan))
> @@ -1300,6 +1304,8 @@ static unsigned long kscand_scan_mm_slot(void)
> kscand_update_mmslot_info(mm_slot, total, target_node);
> }
>
> + trace_kmem_scan_mm_end(mm, address, total, mm_slot_scan_period,
> + mm_slot_scan_size, target_node);
>
> outerloop:
> /* exit_mmap will destroy ptes after this */
> @@ -1453,6 +1459,7 @@ void __kscand_enter(struct mm_struct *mm)
> spin_unlock(&kscand_mm_lock);
>
> mmgrab(mm);
> + trace_kmem_mm_enter(mm);
> if (wakeup)
> wake_up_interruptible(&kscand_wait);
> }
> @@ -1463,6 +1470,7 @@ void __kscand_exit(struct mm_struct *mm)
> struct mm_slot *slot;
> int free = 0, serialize = 1;
>
> + trace_kmem_mm_exit(mm);
> spin_lock(&kscand_mm_lock);
> slot = mm_slot_lookup(kscand_slots_hash, mm);
> mm_slot = mm_slot_entry(slot, struct kscand_mm_slot, slot);
> --
> 2.34.1
>
>
--
Masami Hiramatsu (Google) <mhiramat@kernel.org>
On 6/24/2025 12:39 PM, Masami Hiramatsu (Google) wrote: > On Tue, 24 Jun 2025 05:56:16 +0000 > Raghavendra K T <raghavendra.kt@amd.com> wrote: > >> Add tracing support to track >> - start and end of scanning. >> - migration. >> >> CC: Steven Rostedt <rostedt@goodmis.org> >> CC: Masami Hiramatsu <mhiramat@kernel.org> >> CC: linux-trace-kernel@vger.kernel.org >> >> Signed-off-by: Raghavendra K T <raghavendra.kt@amd.com> >> --- >> include/trace/events/kmem.h | 90 +++++++++++++++++++++++++++++++++++++ >> mm/kscand.c | 8 ++++ >> 2 files changed, 98 insertions(+) >> >> diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h >> index f74925a6cf69..682c4015414f 100644 >> --- a/include/trace/events/kmem.h >> +++ b/include/trace/events/kmem.h >> @@ -9,6 +9,96 @@ >> #include <linux/tracepoint.h> >> #include <trace/events/mmflags.h> >> > > Please make sure the event is not exposed when it is not used. > > #ifdef CONFIG_KSCAND > > Thank you, > [...] Sure. Noted. Thank you :)
© 2016 - 2026 Red Hat, Inc.