[PATCH v4 5/5] mm: add tracepoints for zone lock

Dmitry Ilvokhin posted 5 patches 1 month ago
[PATCH v4 5/5] mm: add tracepoints for zone lock
Posted by Dmitry Ilvokhin 1 month ago
Add tracepoint instrumentation to zone lock acquire/release operations
via the previously introduced wrappers.

The implementation follows the mmap_lock tracepoint pattern: a
lightweight inline helper checks whether the tracepoint is enabled and
calls into an out-of-line helper when tracing is active. When
CONFIG_TRACING is disabled, helpers compile to empty inline stubs.

The fast path is unaffected when tracing is disabled.

Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
---
 MAINTAINERS                      |  1 +
 include/linux/mmzone_lock.h      | 64 +++++++++++++++++++++++++++++++-
 include/trace/events/zone_lock.h | 64 ++++++++++++++++++++++++++++++++
 mm/Makefile                      |  2 +-
 mm/zone_lock.c                   | 28 ++++++++++++++
 5 files changed, 157 insertions(+), 2 deletions(-)
 create mode 100644 include/trace/events/zone_lock.h
 create mode 100644 mm/zone_lock.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 947298ecb111..de39e87a4c46 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16681,6 +16681,7 @@ F:	include/linux/pgtable.h
 F:	include/linux/ptdump.h
 F:	include/linux/vmpressure.h
 F:	include/linux/vmstat.h
+F:	include/trace/events/zone_lock.h
 F:	kernel/fork.c
 F:	mm/Kconfig
 F:	mm/debug.c
diff --git a/include/linux/mmzone_lock.h b/include/linux/mmzone_lock.h
index 62e34d500078..6bd8b026029f 100644
--- a/include/linux/mmzone_lock.h
+++ b/include/linux/mmzone_lock.h
@@ -4,6 +4,53 @@
 
 #include <linux/mmzone.h>
 #include <linux/spinlock.h>
+#include <linux/tracepoint-defs.h>
+
+DECLARE_TRACEPOINT(zone_lock_start_locking);
+DECLARE_TRACEPOINT(zone_lock_acquire_returned);
+DECLARE_TRACEPOINT(zone_lock_released);
+
+#ifdef CONFIG_TRACING
+
+void __zone_lock_do_trace_start_locking(struct zone *zone);
+void __zone_lock_do_trace_acquire_returned(struct zone *zone, bool success);
+void __zone_lock_do_trace_released(struct zone *zone);
+
+static inline void __zone_lock_trace_start_locking(struct zone *zone)
+{
+	if (tracepoint_enabled(zone_lock_start_locking))
+		__zone_lock_do_trace_start_locking(zone);
+}
+
+static inline void __zone_lock_trace_acquire_returned(struct zone *zone,
+						      bool success)
+{
+	if (tracepoint_enabled(zone_lock_acquire_returned))
+		__zone_lock_do_trace_acquire_returned(zone, success);
+}
+
+static inline void __zone_lock_trace_released(struct zone *zone)
+{
+	if (tracepoint_enabled(zone_lock_released))
+		__zone_lock_do_trace_released(zone);
+}
+
+#else /* !CONFIG_TRACING */
+
+static inline void __zone_lock_trace_start_locking(struct zone *zone)
+{
+}
+
+static inline void __zone_lock_trace_acquire_returned(struct zone *zone,
+						      bool success)
+{
+}
+
+static inline void __zone_lock_trace_released(struct zone *zone)
+{
+}
+
+#endif /* CONFIG_TRACING */
 
 static inline void zone_lock_init(struct zone *zone)
 {
@@ -12,26 +59,41 @@ static inline void zone_lock_init(struct zone *zone)
 
 #define zone_lock_irqsave(zone, flags)				\
 do {								\
+	bool success = true;					\
+								\
+	__zone_lock_trace_start_locking(zone);			\
 	spin_lock_irqsave(&(zone)->_lock, flags);		\
+	__zone_lock_trace_acquire_returned(zone, success);	\
 } while (0)
 
 #define zone_trylock_irqsave(zone, flags)			\
 ({								\
-	spin_trylock_irqsave(&(zone)->_lock, flags);		\
+	bool success;						\
+								\
+	__zone_lock_trace_start_locking(zone);			\
+	success = spin_trylock_irqsave(&(zone)->_lock, flags);	\
+	__zone_lock_trace_acquire_returned(zone, success);	\
+	success;						\
 })
 
 static inline void zone_unlock_irqrestore(struct zone *zone, unsigned long flags)
 {
+	__zone_lock_trace_released(zone);
 	spin_unlock_irqrestore(&zone->_lock, flags);
 }
 
 static inline void zone_lock_irq(struct zone *zone)
 {
+	bool success = true;
+
+	__zone_lock_trace_start_locking(zone);
 	spin_lock_irq(&zone->_lock);
+	__zone_lock_trace_acquire_returned(zone, success);
 }
 
 static inline void zone_unlock_irq(struct zone *zone)
 {
+	__zone_lock_trace_released(zone);
 	spin_unlock_irq(&zone->_lock);
 }
 
diff --git a/include/trace/events/zone_lock.h b/include/trace/events/zone_lock.h
new file mode 100644
index 000000000000..3df82a8c0160
--- /dev/null
+++ b/include/trace/events/zone_lock.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM zone_lock
+
+#if !defined(_TRACE_ZONE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ZONE_LOCK_H
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+struct zone;
+
+DECLARE_EVENT_CLASS(zone_lock,
+
+	TP_PROTO(struct zone *zone),
+
+	TP_ARGS(zone),
+
+	TP_STRUCT__entry(
+		__field(struct zone *, zone)
+	),
+
+	TP_fast_assign(
+		__entry->zone = zone;
+	),
+
+	TP_printk("zone=%p", __entry->zone)
+);
+
+#define DEFINE_ZONE_LOCK_EVENT(name)			\
+	DEFINE_EVENT(zone_lock, name,			\
+		TP_PROTO(struct zone *zone),		\
+		TP_ARGS(zone))
+
+DEFINE_ZONE_LOCK_EVENT(zone_lock_start_locking);
+DEFINE_ZONE_LOCK_EVENT(zone_lock_released);
+
+TRACE_EVENT(zone_lock_acquire_returned,
+
+	TP_PROTO(struct zone *zone, bool success),
+
+	TP_ARGS(zone, success),
+
+	TP_STRUCT__entry(
+		__field(struct zone *, zone)
+		__field(bool, success)
+	),
+
+	TP_fast_assign(
+		__entry->zone = zone;
+		__entry->success = success;
+	),
+
+	TP_printk(
+		"zone=%p success=%s",
+		__entry->zone,
+		__entry->success ? "true" : "false"
+	)
+);
+
+#endif /* _TRACE_ZONE_LOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/mm/Makefile b/mm/Makefile
index 8ad2ab08244e..ffd06cf7a04e 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -55,7 +55,7 @@ obj-y			:= filemap.o mempool.o oom_kill.o fadvise.o \
 			   mm_init.o percpu.o slab_common.o \
 			   compaction.o show_mem.o \
 			   interval_tree.o list_lru.o workingset.o \
-			   debug.o gup.o mmap_lock.o vma_init.o $(mmu-y)
+			   debug.o gup.o mmap_lock.o zone_lock.o vma_init.o $(mmu-y)
 
 # Give 'page_alloc' its own module-parameter namespace
 page-alloc-y := page_alloc.o
diff --git a/mm/zone_lock.c b/mm/zone_lock.c
new file mode 100644
index 000000000000..f4e32220af9a
--- /dev/null
+++ b/mm/zone_lock.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+#define CREATE_TRACE_POINTS
+#include <trace/events/zone_lock.h>
+
+#include <linux/mmzone_lock.h>
+
+EXPORT_TRACEPOINT_SYMBOL(zone_lock_start_locking);
+EXPORT_TRACEPOINT_SYMBOL(zone_lock_acquire_returned);
+EXPORT_TRACEPOINT_SYMBOL(zone_lock_released);
+
+#ifdef CONFIG_TRACING
+
+void __zone_lock_do_trace_start_locking(struct zone *zone)
+{
+	trace_zone_lock_start_locking(zone);
+}
+
+void __zone_lock_do_trace_acquire_returned(struct zone *zone, bool success)
+{
+	trace_zone_lock_acquire_returned(zone, success);
+}
+
+void __zone_lock_do_trace_released(struct zone *zone)
+{
+	trace_zone_lock_released(zone);
+}
+
+#endif /* CONFIG_TRACING */
-- 
2.47.3
Re: [PATCH v4 5/5] mm: add tracepoints for zone lock
Posted by Vlastimil Babka (SUSE) 1 month ago
On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> Add tracepoint instrumentation to zone lock acquire/release operations
> via the previously introduced wrappers.
> 
> The implementation follows the mmap_lock tracepoint pattern: a
> lightweight inline helper checks whether the tracepoint is enabled and
> calls into an out-of-line helper when tracing is active. When
> CONFIG_TRACING is disabled, helpers compile to empty inline stubs.
> 
> The fast path is unaffected when tracing is disabled.
> 
> Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> Acked-by: Shakeel Butt <shakeel.butt@linux.dev>

Agree with Steven; otherwise

Reviewed-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
Re: [PATCH v4 5/5] mm: add tracepoints for zone lock
Posted by Steven Rostedt 1 month ago
On Fri, 27 Feb 2026 16:00:27 +0000
Dmitry Ilvokhin <d@ilvokhin.com> wrote:

>  static inline void zone_lock_init(struct zone *zone)
>  {
> @@ -12,26 +59,41 @@ static inline void zone_lock_init(struct zone *zone)
>  
>  #define zone_lock_irqsave(zone, flags)				\
>  do {								\
> +	bool success = true;					\
> +								\
> +	__zone_lock_trace_start_locking(zone);			\
>  	spin_lock_irqsave(&(zone)->_lock, flags);		\
> +	__zone_lock_trace_acquire_returned(zone, success);	\

Why the "success" variable and not just:

	__zone_lock_trace_acquire_returned(zone, true);

 ?


>  } while (0)
>  
>  #define zone_trylock_irqsave(zone, flags)			\
>  ({								\
> -	spin_trylock_irqsave(&(zone)->_lock, flags);		\
> +	bool success;						\
> +								\
> +	__zone_lock_trace_start_locking(zone);			\
> +	success = spin_trylock_irqsave(&(zone)->_lock, flags);	\
> +	__zone_lock_trace_acquire_returned(zone, success);	\
> +	success;						\
>  })
>  
>  static inline void zone_unlock_irqrestore(struct zone *zone, unsigned long flags)
>  {
> +	__zone_lock_trace_released(zone);
>  	spin_unlock_irqrestore(&zone->_lock, flags);
>  }
>  
>  static inline void zone_lock_irq(struct zone *zone)
>  {
> +	bool success = true;
> +
> +	__zone_lock_trace_start_locking(zone);
>  	spin_lock_irq(&zone->_lock);
> +	__zone_lock_trace_acquire_returned(zone, success);

Same here.

>  }
>  
>  static inline void zone_unlock_irq(struct zone *zone)
>  {
> +	__zone_lock_trace_released(zone);
>  	spin_unlock_irq(&zone->_lock);
>  }
>

-- Steve
Re: [PATCH v4 5/5] mm: add tracepoints for zone lock
Posted by Dmitry Ilvokhin 1 month ago
On Fri, Feb 27, 2026 at 02:46:49PM -0500, Steven Rostedt wrote:
> On Fri, 27 Feb 2026 16:00:27 +0000
> Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> 
> >  static inline void zone_lock_init(struct zone *zone)
> >  {
> > @@ -12,26 +59,41 @@ static inline void zone_lock_init(struct zone *zone)
> >  
> >  #define zone_lock_irqsave(zone, flags)				\
> >  do {								\
> > +	bool success = true;					\
> > +								\
> > +	__zone_lock_trace_start_locking(zone);			\
> >  	spin_lock_irqsave(&(zone)->_lock, flags);		\
> > +	__zone_lock_trace_acquire_returned(zone, success);	\
> 
> Why the "success" variable and not just:
> 
> 	__zone_lock_trace_acquire_returned(zone, true);
> 
>  ?

Good point, passing true directly is cleaner. Happy to respin if needed.