[PATCH v4 4/5] mm: rename zone->lock to zone->_lock

Dmitry Ilvokhin posted 5 patches 1 month ago
[PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Dmitry Ilvokhin 1 month ago
This intentionally breaks direct users of zone->lock at compile time so
all call sites are converted to the zone lock wrappers. Without the
rename, present and future out-of-tree code could continue using
spin_lock(&zone->lock) and bypass the wrappers and tracing
infrastructure.

No functional change intended.

Suggested-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: SeongJae Park <sj@kernel.org>
---
 include/linux/mmzone.h      |  7 +++++--
 include/linux/mmzone_lock.h | 12 ++++++------
 mm/compaction.c             |  4 ++--
 mm/internal.h               |  2 +-
 mm/page_alloc.c             | 16 ++++++++--------
 mm/page_isolation.c         |  4 ++--
 mm/page_owner.c             |  2 +-
 7 files changed, 25 insertions(+), 22 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3e51190a55e4..32bca655fce5 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1009,8 +1009,11 @@ struct zone {
 	/* zone flags, see below */
 	unsigned long		flags;
 
-	/* Primarily protects free_area */
-	spinlock_t		lock;
+	/*
+	 * Primarily protects free_area. Should be accessed via zone_lock_*
+	 * helpers.
+	 */
+	spinlock_t		_lock;
 
 	/* Pages to be freed when next trylock succeeds */
 	struct llist_head	trylock_free_pages;
diff --git a/include/linux/mmzone_lock.h b/include/linux/mmzone_lock.h
index a1cfba8408d6..62e34d500078 100644
--- a/include/linux/mmzone_lock.h
+++ b/include/linux/mmzone_lock.h
@@ -7,32 +7,32 @@
 
 static inline void zone_lock_init(struct zone *zone)
 {
-	spin_lock_init(&zone->lock);
+	spin_lock_init(&zone->_lock);
 }
 
 #define zone_lock_irqsave(zone, flags)				\
 do {								\
-	spin_lock_irqsave(&(zone)->lock, flags);		\
+	spin_lock_irqsave(&(zone)->_lock, flags);		\
 } while (0)
 
 #define zone_trylock_irqsave(zone, flags)			\
 ({								\
-	spin_trylock_irqsave(&(zone)->lock, flags);		\
+	spin_trylock_irqsave(&(zone)->_lock, flags);		\
 })
 
 static inline void zone_unlock_irqrestore(struct zone *zone, unsigned long flags)
 {
-	spin_unlock_irqrestore(&zone->lock, flags);
+	spin_unlock_irqrestore(&zone->_lock, flags);
 }
 
 static inline void zone_lock_irq(struct zone *zone)
 {
-	spin_lock_irq(&zone->lock);
+	spin_lock_irq(&zone->_lock);
 }
 
 static inline void zone_unlock_irq(struct zone *zone)
 {
-	spin_unlock_irq(&zone->lock);
+	spin_unlock_irq(&zone->_lock);
 }
 
 #endif /* _LINUX_MMZONE_LOCK_H */
diff --git a/mm/compaction.c b/mm/compaction.c
index c68fcc416fc7..ac2a259518b1 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -506,7 +506,7 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page)
 static bool compact_zone_lock_irqsave(struct zone *zone,
 				      unsigned long *flags,
 				      struct compact_control *cc)
-	__acquires(&zone->lock)
+	__acquires(&zone->_lock)
 {
 	/* Track if the lock is contended in async mode */
 	if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
@@ -1402,7 +1402,7 @@ static bool suitable_migration_target(struct compact_control *cc,
 		int order = cc->order > 0 ? cc->order : pageblock_order;
 
 		/*
-		 * We are checking page_order without zone->lock taken. But
+		 * We are checking page_order without zone->_lock taken. But
 		 * the only small danger is that we skip a potentially suitable
 		 * pageblock, so it's not worth to check order for valid range.
 		 */
diff --git a/mm/internal.h b/mm/internal.h
index cb0af847d7d9..6cb06e21ce15 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -710,7 +710,7 @@ static inline unsigned int buddy_order(struct page *page)
  * (d) a page and its buddy are in the same zone.
  *
  * For recording whether a page is in the buddy system, we set PageBuddy.
- * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
+ * Setting, clearing, and testing PageBuddy is serialized by zone->_lock.
  *
  * For recording page's order, we use page_private(page).
  */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bcc3fe0368fc..0d078aef8ed6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -815,7 +815,7 @@ compaction_capture(struct capture_control *capc, struct page *page,
 static inline void account_freepages(struct zone *zone, int nr_pages,
 				     int migratetype)
 {
-	lockdep_assert_held(&zone->lock);
+	lockdep_assert_held(&zone->_lock);
 
 	if (is_migrate_isolate(migratetype))
 		return;
@@ -2473,7 +2473,7 @@ enum rmqueue_mode {
 
 /*
  * Do the hard work of removing an element from the buddy allocator.
- * Call me with the zone->lock already held.
+ * Call me with the zone->_lock already held.
  */
 static __always_inline struct page *
 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
@@ -2501,7 +2501,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
 	 * fallbacks modes with increasing levels of fragmentation risk.
 	 *
 	 * The fallback logic is expensive and rmqueue_bulk() calls in
-	 * a loop with the zone->lock held, meaning the freelists are
+	 * a loop with the zone->_lock held, meaning the freelists are
 	 * not subject to any outside changes. Remember in *mode where
 	 * we found pay dirt, to save us the search on the next call.
 	 */
@@ -3203,7 +3203,7 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
 	struct zone *zone = page_zone(page);
 
 	/* zone lock should be held when this function is called */
-	lockdep_assert_held(&zone->lock);
+	lockdep_assert_held(&zone->_lock);
 
 	/* Return isolated page to tail of freelist. */
 	__free_one_page(page, page_to_pfn(page), zone, order, mt,
@@ -7086,7 +7086,7 @@ int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
 	 * pages.  Because of this, we reserve the bigger range and
 	 * once this is done free the pages we are not interested in.
 	 *
-	 * We don't have to hold zone->lock here because the pages are
+	 * We don't have to hold zone->_lock here because the pages are
 	 * isolated thus they won't get removed from buddy.
 	 */
 	outer_start = find_large_buddy(start);
@@ -7655,7 +7655,7 @@ void accept_page(struct page *page)
 		return;
 	}
 
-	/* Unlocks zone->lock */
+	/* Unlocks zone->_lock */
 	__accept_page(zone, &flags, page);
 }
 
@@ -7672,7 +7672,7 @@ static bool try_to_accept_memory_one(struct zone *zone)
 		return false;
 	}
 
-	/* Unlocks zone->lock */
+	/* Unlocks zone->_lock */
 	__accept_page(zone, &flags, page);
 
 	return true;
@@ -7813,7 +7813,7 @@ struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned
 
 	/*
 	 * Best effort allocation from percpu free list.
-	 * If it's empty attempt to spin_trylock zone->lock.
+	 * If it's empty attempt to spin_trylock zone->_lock.
 	 */
 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
 
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 91a0836bf1b7..cf731370e7a7 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -212,7 +212,7 @@ static int set_migratetype_isolate(struct page *page, enum pb_isolate_mode mode,
 	zone_unlock_irqrestore(zone, flags);
 	if (mode == PB_ISOLATE_MODE_MEM_OFFLINE) {
 		/*
-		 * printk() with zone->lock held will likely trigger a
+		 * printk() with zone->_lock held will likely trigger a
 		 * lockdep splat, so defer it here.
 		 */
 		dump_page(unmovable, "unmovable page");
@@ -553,7 +553,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
 /*
  * Test all pages in the range is free(means isolated) or not.
  * all pages in [start_pfn...end_pfn) must be in the same zone.
- * zone->lock must be held before call this.
+ * zone->_lock must be held before call this.
  *
  * Returns the last tested pfn.
  */
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 8178e0be557f..54a4ba63b14f 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -799,7 +799,7 @@ static void init_pages_in_zone(struct zone *zone)
 				continue;
 
 			/*
-			 * To avoid having to grab zone->lock, be a little
+			 * To avoid having to grab zone->_lock, be a little
 			 * careful when reading buddy page order. The only
 			 * danger is that we skip too much and potentially miss
 			 * some early allocated pages, which is better than
-- 
2.47.3
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Pedro Falcato 3 weeks, 6 days ago
On Fri, Feb 27, 2026 at 04:00:26PM +0000, Dmitry Ilvokhin wrote:
> This intentionally breaks direct users of zone->lock at compile time so
> all call sites are converted to the zone lock wrappers. Without the
> rename, present and future out-of-tree code could continue using
> spin_lock(&zone->lock) and bypass the wrappers and tracing
> infrastructure.
> 
> No functional change intended.
> 
> Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> Acked-by: SeongJae Park <sj@kernel.org>
> ---
>  include/linux/mmzone.h      |  7 +++++--
>  include/linux/mmzone_lock.h | 12 ++++++------
>  mm/compaction.c             |  4 ++--
>  mm/internal.h               |  2 +-
>  mm/page_alloc.c             | 16 ++++++++--------
>  mm/page_isolation.c         |  4 ++--
>  mm/page_owner.c             |  2 +-
>  7 files changed, 25 insertions(+), 22 deletions(-)
> 
> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
> index 3e51190a55e4..32bca655fce5 100644
> --- a/include/linux/mmzone.h
> +++ b/include/linux/mmzone.h
> @@ -1009,8 +1009,11 @@ struct zone {
>  	/* zone flags, see below */
>  	unsigned long		flags;
>  
> -	/* Primarily protects free_area */
> -	spinlock_t		lock;
> +	/*
> +	 * Primarily protects free_area. Should be accessed via zone_lock_*
> +	 * helpers.
> +	 */
> +	spinlock_t		_lock;

I really don't like this uglification.
Suggestion:
	spinlock_t __private	lock;

>  
>  	/* Pages to be freed when next trylock succeeds */
>  	struct llist_head	trylock_free_pages;
> diff --git a/include/linux/mmzone_lock.h b/include/linux/mmzone_lock.h
> index a1cfba8408d6..62e34d500078 100644
> --- a/include/linux/mmzone_lock.h
> +++ b/include/linux/mmzone_lock.h
> @@ -7,32 +7,32 @@
>  
>  static inline void zone_lock_init(struct zone *zone)
>  {
> -	spin_lock_init(&zone->lock);

and then ACCESS_PRIVATE() all over these helpers. This will not make a
difference to the compiler, but it will work with sparse.

It's not that I don't understand what you're doing, but we're going to need
to look to this code and refer to this code 20 years from now, I would rather
not refer to zone->_lock :)

-- 
Pedro
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Vlastimil Babka (SUSE) 1 month ago
On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> This intentionally breaks direct users of zone->lock at compile time so
> all call sites are converted to the zone lock wrappers. Without the
> rename, present and future out-of-tree code could continue using
> spin_lock(&zone->lock) and bypass the wrappers and tracing
> infrastructure.
> 
> No functional change intended.
> 
> Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> Acked-by: SeongJae Park <sj@kernel.org>

I see some more instances of 'zone->lock' in comments in
include/linux/mmzone.h and under Documentation/ but otherwise LGTM.

Reviewed-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Andrew Morton 1 month ago
On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:

> On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> > This intentionally breaks direct users of zone->lock at compile time so
> > all call sites are converted to the zone lock wrappers. Without the
> > rename, present and future out-of-tree code could continue using
> > spin_lock(&zone->lock) and bypass the wrappers and tracing
> > infrastructure.
> > 
> > No functional change intended.
> > 
> > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> > Acked-by: SeongJae Park <sj@kernel.org>
> 
> I see some more instances of 'zone->lock' in comments in
> include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
> 

I fixed (most of) that in the previous version but my fix was lost.


 include/linux/mmzone.h |   10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

--- a/include/linux/mmzone.h~mm-rename-zone-lock-to-zone-_lock-fix
+++ a/include/linux/mmzone.h
@@ -1037,12 +1037,12 @@ struct zone {
 	 * Locking rules:
 	 *
 	 * zone_start_pfn and spanned_pages are protected by span_seqlock.
-	 * It is a seqlock because it has to be read outside of zone->lock,
+	 * It is a seqlock because it has to be read outside of zone_lock,
 	 * and it is done in the main allocator path.  But, it is written
 	 * quite infrequently.
 	 *
-	 * The span_seq lock is declared along with zone->lock because it is
-	 * frequently read in proximity to zone->lock.  It's good to
+	 * The span_seq lock is declared along with zone_lock because it is
+	 * frequently read in proximity to zone_lock.  It's good to
 	 * give them a chance of being in the same cacheline.
 	 *
 	 * Write access to present_pages at runtime should be protected by
@@ -1065,7 +1065,7 @@ struct zone {
 	/*
 	 * Number of isolated pageblock. It is used to solve incorrect
 	 * freepage counting problem due to racy retrieving migratetype
-	 * of pageblock. Protected by zone->lock.
+	 * of pageblock. Protected by zone_lock.
 	 */
 	unsigned long		nr_isolate_pageblock;
 #endif
@@ -1502,7 +1502,7 @@ typedef struct pglist_data {
 	 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
 	 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
 	 *
-	 * Nests above zone->lock and zone->span_seqlock
+	 * Nests above zone_lock and zone->span_seqlock
 	 */
 	spinlock_t node_size_lock;
 #endif
_
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Dmitry Ilvokhin 1 month ago
On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
> On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
> 
> > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> > > This intentionally breaks direct users of zone->lock at compile time so
> > > all call sites are converted to the zone lock wrappers. Without the
> > > rename, present and future out-of-tree code could continue using
> > > spin_lock(&zone->lock) and bypass the wrappers and tracing
> > > infrastructure.
> > > 
> > > No functional change intended.
> > > 
> > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> > > Acked-by: SeongJae Park <sj@kernel.org>
> > 
> > I see some more instances of 'zone->lock' in comments in
> > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
> > 
> 
> I fixed (most of) that in the previous version but my fix was lost.

Thanks for the fixups, Andrew.

I still see a few 'zone->lock' references in Documentation remain on
mm-new. This patch cleans them up, as noted by Vlastimil.

I'm happy to adjust this patch if anything else needs attention.

From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
From: Dmitry Ilvokhin <d@ilvokhin.com>
Date: Tue, 3 Mar 2026 06:13:13 -0800
Subject: [PATCH] mm: fix remaining zone->lock references

Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
---
 Documentation/mm/physical_memory.rst | 4 ++--
 Documentation/trace/events-kmem.rst  | 8 ++++----
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
index b76183545e5b..e344f93515b6 100644
--- a/Documentation/mm/physical_memory.rst
+++ b/Documentation/mm/physical_memory.rst
@@ -500,11 +500,11 @@ General
 ``nr_isolate_pageblock``
   Number of isolated pageblocks. It is used to solve incorrect freepage counting
   problem due to racy retrieving migratetype of pageblock. Protected by
-  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
+  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
 
 ``span_seqlock``
   The seqlock to protect ``zone_start_pfn`` and ``spanned_pages``. It is a
-  seqlock because it has to be read outside of ``zone->lock``, and it is done in
+  seqlock because it has to be read outside of ``zone_lock``, and it is done in
   the main allocator path. However, the seqlock is written quite infrequently.
   Defined only when ``CONFIG_MEMORY_HOTPLUG`` is enabled.
 
diff --git a/Documentation/trace/events-kmem.rst b/Documentation/trace/events-kmem.rst
index 68fa75247488..3c20a972de27 100644
--- a/Documentation/trace/events-kmem.rst
+++ b/Documentation/trace/events-kmem.rst
@@ -57,7 +57,7 @@ the per-CPU allocator (high performance) or the buddy allocator.
 
 If pages are allocated directly from the buddy allocator, the
 mm_page_alloc_zone_locked event is triggered. This event is important as high
-amounts of activity imply high activity on the zone->lock. Taking this lock
+amounts of activity imply high activity on the zone_lock. Taking this lock
 impairs performance by disabling interrupts, dirtying cache lines between
 CPUs and serialising many CPUs.
 
@@ -79,11 +79,11 @@ contention on the lruvec->lru_lock.
   mm_page_pcpu_drain		page=%p pfn=%lu order=%d cpu=%d migratetype=%d
 
 In front of the page allocator is a per-cpu page allocator. It exists only
-for order-0 pages, reduces contention on the zone->lock and reduces the
+for order-0 pages, reduces contention on the zone_lock and reduces the
 amount of writing on struct page.
 
 When a per-CPU list is empty or pages of the wrong type are allocated,
-the zone->lock will be taken once and the per-CPU list refilled. The event
+the zone_lock will be taken once and the per-CPU list refilled. The event
 triggered is mm_page_alloc_zone_locked for each page allocated with the
 event indicating whether it is for a percpu_refill or not.
 
@@ -92,7 +92,7 @@ which triggers a mm_page_pcpu_drain event.
 
 The individual nature of the events is so that pages can be tracked
 between allocation and freeing. A number of drain or refill pages that occur
-consecutively imply the zone->lock being taken once. Large amounts of per-CPU
+consecutively imply the zone_lock being taken once. Large amounts of per-CPU
 refills and drains could imply an imbalance between CPUs where too much work
 is being concentrated in one place. It could also indicate that the per-CPU
 lists should be a larger size. Finally, large amounts of refills on one CPU
-- 
2.47.3
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by SeongJae Park 4 weeks, 1 day ago
On Tue, 3 Mar 2026 14:25:55 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:

> On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
> > On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
> > 
> > > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> > > > This intentionally breaks direct users of zone->lock at compile time so
> > > > all call sites are converted to the zone lock wrappers. Without the
> > > > rename, present and future out-of-tree code could continue using
> > > > spin_lock(&zone->lock) and bypass the wrappers and tracing
> > > > infrastructure.
> > > > 
> > > > No functional change intended.
> > > > 
> > > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> > > > Acked-by: SeongJae Park <sj@kernel.org>
> > > 
> > > I see some more instances of 'zone->lock' in comments in
> > > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
> > > 
> > 
> > I fixed (most of) that in the previous version but my fix was lost.
> 
> Thanks for the fixups, Andrew.
> 
> I still see a few 'zone->lock' references in Documentation remain on
> mm-new. This patch cleans them up, as noted by Vlastimil.
> 
> I'm happy to adjust this patch if anything else needs attention.
> 
> From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
> From: Dmitry Ilvokhin <d@ilvokhin.com>
> Date: Tue, 3 Mar 2026 06:13:13 -0800
> Subject: [PATCH] mm: fix remaining zone->lock references
> 
> Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> ---
>  Documentation/mm/physical_memory.rst | 4 ++--
>  Documentation/trace/events-kmem.rst  | 8 ++++----
>  2 files changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
> index b76183545e5b..e344f93515b6 100644
> --- a/Documentation/mm/physical_memory.rst
> +++ b/Documentation/mm/physical_memory.rst
> @@ -500,11 +500,11 @@ General
>  ``nr_isolate_pageblock``
>    Number of isolated pageblocks. It is used to solve incorrect freepage counting
>    problem due to racy retrieving migratetype of pageblock. Protected by
> -  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> +  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.

Dmitry's original patch [1] was doing 's/zone->lock/zone->_lock/', which aligns
to my expectation.  But this patch is doing 's/zone->lock/zone_lock/'.  Same
for the rest of this patch.

I was initially thinking this is just a mistake, but I also found Andrew is
doing same change [2], so I'm bit confused.  Is this an intentional change?

[1] https://lore.kernel.org/d61500c5784c64e971f4d328c57639303c475f81.1772206930.git.d@ilvokhin.com
[2] https://lore.kernel.org/20260302143743.220eed4feb36d7572fe726cc@linux-foundation.org


Thanks,
SJ

[...]
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Dmitry Ilvokhin 4 weeks, 1 day ago
On Tue, Mar 03, 2026 at 05:50:34PM -0800, SeongJae Park wrote:
> On Tue, 3 Mar 2026 14:25:55 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> 
> > On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
> > > On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
> > > 
> > > > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> > > > > This intentionally breaks direct users of zone->lock at compile time so
> > > > > all call sites are converted to the zone lock wrappers. Without the
> > > > > rename, present and future out-of-tree code could continue using
> > > > > spin_lock(&zone->lock) and bypass the wrappers and tracing
> > > > > infrastructure.
> > > > > 
> > > > > No functional change intended.
> > > > > 
> > > > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> > > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > > > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> > > > > Acked-by: SeongJae Park <sj@kernel.org>
> > > > 
> > > > I see some more instances of 'zone->lock' in comments in
> > > > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
> > > > 
> > > 
> > > I fixed (most of) that in the previous version but my fix was lost.
> > 
> > Thanks for the fixups, Andrew.
> > 
> > I still see a few 'zone->lock' references in Documentation remain on
> > mm-new. This patch cleans them up, as noted by Vlastimil.
> > 
> > I'm happy to adjust this patch if anything else needs attention.
> > 
> > From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
> > From: Dmitry Ilvokhin <d@ilvokhin.com>
> > Date: Tue, 3 Mar 2026 06:13:13 -0800
> > Subject: [PATCH] mm: fix remaining zone->lock references
> > 
> > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > ---
> >  Documentation/mm/physical_memory.rst | 4 ++--
> >  Documentation/trace/events-kmem.rst  | 8 ++++----
> >  2 files changed, 6 insertions(+), 6 deletions(-)
> > 
> > diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
> > index b76183545e5b..e344f93515b6 100644
> > --- a/Documentation/mm/physical_memory.rst
> > +++ b/Documentation/mm/physical_memory.rst
> > @@ -500,11 +500,11 @@ General
> >  ``nr_isolate_pageblock``
> >    Number of isolated pageblocks. It is used to solve incorrect freepage counting
> >    problem due to racy retrieving migratetype of pageblock. Protected by
> > -  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> > +  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> 
> Dmitry's original patch [1] was doing 's/zone->lock/zone->_lock/', which aligns
> to my expectation.  But this patch is doing 's/zone->lock/zone_lock/'.  Same
> for the rest of this patch.
> 
> I was initially thinking this is just a mistake, but I also found Andrew is
> doing same change [2], so I'm bit confused.  Is this an intentional change?
> 
> [1] https://lore.kernel.org/d61500c5784c64e971f4d328c57639303c475f81.1772206930.git.d@ilvokhin.com
> [2] https://lore.kernel.org/20260302143743.220eed4feb36d7572fe726cc@linux-foundation.org
> 

Good catch, thanks for pointing this out, SJ.

Originally the mechanical rename was indeed zone->lock -> zone->_lock.
However, in Documentation I intentionally switched references to
zone_lock instead of zone->_lock. The reasoning is that _lock is now an
internal implementation detail, and direct access is discouraged. The
intended interface is via the zone_lock_*() / zone_unlock_*() wrappers,
so referencing zone_lock in documentation felt more appropriate than
mentioning the private struct field (zone->_lock).

That said, I agree this creates inconsistency with the mechanical
rename, and I'm happy to adjust either way: either consistently refer
to the wrapper API, or keep documentation aligned with zone->_lock.

I slightly prefer referring to the wrapper API, but don't have a strong
preference as long as we're consistent.

> 
> Thanks,
> SJ
> 
> [...]
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by SeongJae Park 4 weeks, 1 day ago
On Wed, 4 Mar 2026 13:01:45 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:

> On Tue, Mar 03, 2026 at 05:50:34PM -0800, SeongJae Park wrote:
> > On Tue, 3 Mar 2026 14:25:55 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> > 
> > > On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
> > > > On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
> > > > 
> > > > > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> > > > > > This intentionally breaks direct users of zone->lock at compile time so
> > > > > > all call sites are converted to the zone lock wrappers. Without the
> > > > > > rename, present and future out-of-tree code could continue using
> > > > > > spin_lock(&zone->lock) and bypass the wrappers and tracing
> > > > > > infrastructure.
> > > > > > 
> > > > > > No functional change intended.
> > > > > > 
> > > > > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> > > > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > > > > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> > > > > > Acked-by: SeongJae Park <sj@kernel.org>
> > > > > 
> > > > > I see some more instances of 'zone->lock' in comments in
> > > > > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
> > > > > 
> > > > 
> > > > I fixed (most of) that in the previous version but my fix was lost.
> > > 
> > > Thanks for the fixups, Andrew.
> > > 
> > > I still see a few 'zone->lock' references in Documentation remain on
> > > mm-new. This patch cleans them up, as noted by Vlastimil.
> > > 
> > > I'm happy to adjust this patch if anything else needs attention.
> > > 
> > > From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
> > > From: Dmitry Ilvokhin <d@ilvokhin.com>
> > > Date: Tue, 3 Mar 2026 06:13:13 -0800
> > > Subject: [PATCH] mm: fix remaining zone->lock references
> > > 
> > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > > ---
> > >  Documentation/mm/physical_memory.rst | 4 ++--
> > >  Documentation/trace/events-kmem.rst  | 8 ++++----
> > >  2 files changed, 6 insertions(+), 6 deletions(-)
> > > 
> > > diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
> > > index b76183545e5b..e344f93515b6 100644
> > > --- a/Documentation/mm/physical_memory.rst
> > > +++ b/Documentation/mm/physical_memory.rst
> > > @@ -500,11 +500,11 @@ General
> > >  ``nr_isolate_pageblock``
> > >    Number of isolated pageblocks. It is used to solve incorrect freepage counting
> > >    problem due to racy retrieving migratetype of pageblock. Protected by
> > > -  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> > > +  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> > 
> > Dmitry's original patch [1] was doing 's/zone->lock/zone->_lock/', which aligns
> > to my expectation.  But this patch is doing 's/zone->lock/zone_lock/'.  Same
> > for the rest of this patch.
> > 
> > I was initially thinking this is just a mistake, but I also found Andrew is
> > doing same change [2], so I'm bit confused.  Is this an intentional change?
> > 
> > [1] https://lore.kernel.org/d61500c5784c64e971f4d328c57639303c475f81.1772206930.git.d@ilvokhin.com
> > [2] https://lore.kernel.org/20260302143743.220eed4feb36d7572fe726cc@linux-foundation.org
> > 
> 
> Good catch, thanks for pointing this out, SJ.
> 
> Originally the mechanical rename was indeed zone->lock -> zone->_lock.
> However, in Documentation I intentionally switched references to
> zone_lock instead of zone->_lock. The reasoning is that _lock is now an
> internal implementation detail, and direct access is discouraged. The
> intended interface is via the zone_lock_*() / zone_unlock_*() wrappers,
> so referencing zone_lock in documentation felt more appropriate than
> mentioning the private struct field (zone->_lock).

Thank you for this nice and kind clarification, Dmitry!  I agree mentioning
zone_[un]lock_*() helpers instead of the hidden member (zone->_lock) can be
better.

But, I'm concerned if people like me might not aware the intention under
'zone_lock'.  If there is a well-known convention that allows people to know it
is for 'zone_[un]lock_*()' helpers, making it more clear would be nice, in my
humble opinion.  If there is such a convention but I'm just missing it, please
ignore.  If I'm not, for eaxmaple,

"protected by ``zone->lock``" could be re-wrote to
"protected by ``zone_[un]lock_*()`` locking helpers" or,
"protected by zone lock helper functions (``zone_[un]lock_*()``)" ?

> 
> That said, I agree this creates inconsistency with the mechanical
> rename, and I'm happy to adjust either way: either consistently refer
> to the wrapper API, or keep documentation aligned with zone->_lock.
> 
> I slightly prefer referring to the wrapper API, but don't have a strong
> preference as long as we're consistent.

I also think both approaches are good.  But for the wrapper approach, I think
giving more contexts rather than just ``zone_lock`` to readers would be nice.


Thanks,
SJ

[...]
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Vlastimil Babka (SUSE) 4 weeks ago
On 3/4/26 16:13, SeongJae Park wrote:
> On Wed, 4 Mar 2026 13:01:45 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> 
>> On Tue, Mar 03, 2026 at 05:50:34PM -0800, SeongJae Park wrote:
>> > On Tue, 3 Mar 2026 14:25:55 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
>> > 
>> > > On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
>> > > > On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
>> > > > 
>> > > > > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
>> > > > > > This intentionally breaks direct users of zone->lock at compile time so
>> > > > > > all call sites are converted to the zone lock wrappers. Without the
>> > > > > > rename, present and future out-of-tree code could continue using
>> > > > > > spin_lock(&zone->lock) and bypass the wrappers and tracing
>> > > > > > infrastructure.
>> > > > > > 
>> > > > > > No functional change intended.
>> > > > > > 
>> > > > > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
>> > > > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
>> > > > > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
>> > > > > > Acked-by: SeongJae Park <sj@kernel.org>
>> > > > > 
>> > > > > I see some more instances of 'zone->lock' in comments in
>> > > > > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
>> > > > > 
>> > > > 
>> > > > I fixed (most of) that in the previous version but my fix was lost.
>> > > 
>> > > Thanks for the fixups, Andrew.
>> > > 
>> > > I still see a few 'zone->lock' references in Documentation remain on
>> > > mm-new. This patch cleans them up, as noted by Vlastimil.
>> > > 
>> > > I'm happy to adjust this patch if anything else needs attention.
>> > > 
>> > > From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
>> > > From: Dmitry Ilvokhin <d@ilvokhin.com>
>> > > Date: Tue, 3 Mar 2026 06:13:13 -0800
>> > > Subject: [PATCH] mm: fix remaining zone->lock references
>> > > 
>> > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
>> > > ---
>> > >  Documentation/mm/physical_memory.rst | 4 ++--
>> > >  Documentation/trace/events-kmem.rst  | 8 ++++----
>> > >  2 files changed, 6 insertions(+), 6 deletions(-)
>> > > 
>> > > diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
>> > > index b76183545e5b..e344f93515b6 100644
>> > > --- a/Documentation/mm/physical_memory.rst
>> > > +++ b/Documentation/mm/physical_memory.rst
>> > > @@ -500,11 +500,11 @@ General
>> > >  ``nr_isolate_pageblock``
>> > >    Number of isolated pageblocks. It is used to solve incorrect freepage counting
>> > >    problem due to racy retrieving migratetype of pageblock. Protected by
>> > > -  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
>> > > +  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
>> > 
>> > Dmitry's original patch [1] was doing 's/zone->lock/zone->_lock/', which aligns
>> > to my expectation.  But this patch is doing 's/zone->lock/zone_lock/'.  Same
>> > for the rest of this patch.
>> > 
>> > I was initially thinking this is just a mistake, but I also found Andrew is
>> > doing same change [2], so I'm bit confused.  Is this an intentional change?
>> > 
>> > [1] https://lore.kernel.org/d61500c5784c64e971f4d328c57639303c475f81.1772206930.git.d@ilvokhin.com
>> > [2] https://lore.kernel.org/20260302143743.220eed4feb36d7572fe726cc@linux-foundation.org
>> > 
>> 
>> Good catch, thanks for pointing this out, SJ.
>> 
>> Originally the mechanical rename was indeed zone->lock -> zone->_lock.
>> However, in Documentation I intentionally switched references to
>> zone_lock instead of zone->_lock. The reasoning is that _lock is now an
>> internal implementation detail, and direct access is discouraged. The
>> intended interface is via the zone_lock_*() / zone_unlock_*() wrappers,
>> so referencing zone_lock in documentation felt more appropriate than
>> mentioning the private struct field (zone->_lock).
> 
> Thank you for this nice and kind clarification, Dmitry!  I agree mentioning
> zone_[un]lock_*() helpers instead of the hidden member (zone->_lock) can be
> better.
> 
> But, I'm concerned if people like me might not aware the intention under
> 'zone_lock'.  If there is a well-known convention that allows people to know it
> is for 'zone_[un]lock_*()' helpers, making it more clear would be nice, in my
> humble opinion.  If there is such a convention but I'm just missing it, please
> ignore.  If I'm not, for eaxmaple,
> 
> "protected by ``zone->lock``" could be re-wrote to
> "protected by ``zone_[un]lock_*()`` locking helpers" or,
> "protected by zone lock helper functions (``zone_[un]lock_*()``)" ?
> 
>> 
>> That said, I agree this creates inconsistency with the mechanical
>> rename, and I'm happy to adjust either way: either consistently refer
>> to the wrapper API, or keep documentation aligned with zone->_lock.
>> 
>> I slightly prefer referring to the wrapper API, but don't have a strong
>> preference as long as we're consistent.
> 
> I also think both approaches are good.  But for the wrapper approach, I think
> giving more contexts rather than just ``zone_lock`` to readers would be nice.

Grep tells me that we also have comments mentioning simply "zone lock", btw.
And it's also a term used often in informal conversations. Maybe we could
just standardize on that in comments/documentations as it's easier to read.
Discovering that the field is called _lock and that wrappers should be used,
is hopefully not that difficult.
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Dmitry Ilvokhin 3 weeks, 6 days ago
On Thu, Mar 05, 2026 at 10:27:07AM +0100, Vlastimil Babka (SUSE) wrote:
> On 3/4/26 16:13, SeongJae Park wrote:
> > On Wed, 4 Mar 2026 13:01:45 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> > 
> >> On Tue, Mar 03, 2026 at 05:50:34PM -0800, SeongJae Park wrote:
> >> > On Tue, 3 Mar 2026 14:25:55 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> >> > 
> >> > > On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
> >> > > > On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
> >> > > > 
> >> > > > > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> >> > > > > > This intentionally breaks direct users of zone->lock at compile time so
> >> > > > > > all call sites are converted to the zone lock wrappers. Without the
> >> > > > > > rename, present and future out-of-tree code could continue using
> >> > > > > > spin_lock(&zone->lock) and bypass the wrappers and tracing
> >> > > > > > infrastructure.
> >> > > > > > 
> >> > > > > > No functional change intended.
> >> > > > > > 
> >> > > > > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> >> > > > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> >> > > > > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> >> > > > > > Acked-by: SeongJae Park <sj@kernel.org>
> >> > > > > 
> >> > > > > I see some more instances of 'zone->lock' in comments in
> >> > > > > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
> >> > > > > 
> >> > > > 
> >> > > > I fixed (most of) that in the previous version but my fix was lost.
> >> > > 
> >> > > Thanks for the fixups, Andrew.
> >> > > 
> >> > > I still see a few 'zone->lock' references in Documentation remain on
> >> > > mm-new. This patch cleans them up, as noted by Vlastimil.
> >> > > 
> >> > > I'm happy to adjust this patch if anything else needs attention.
> >> > > 
> >> > > From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
> >> > > From: Dmitry Ilvokhin <d@ilvokhin.com>
> >> > > Date: Tue, 3 Mar 2026 06:13:13 -0800
> >> > > Subject: [PATCH] mm: fix remaining zone->lock references
> >> > > 
> >> > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> >> > > ---
> >> > >  Documentation/mm/physical_memory.rst | 4 ++--
> >> > >  Documentation/trace/events-kmem.rst  | 8 ++++----
> >> > >  2 files changed, 6 insertions(+), 6 deletions(-)
> >> > > 
> >> > > diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
> >> > > index b76183545e5b..e344f93515b6 100644
> >> > > --- a/Documentation/mm/physical_memory.rst
> >> > > +++ b/Documentation/mm/physical_memory.rst
> >> > > @@ -500,11 +500,11 @@ General
> >> > >  ``nr_isolate_pageblock``
> >> > >    Number of isolated pageblocks. It is used to solve incorrect freepage counting
> >> > >    problem due to racy retrieving migratetype of pageblock. Protected by
> >> > > -  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> >> > > +  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> >> > 
> >> > Dmitry's original patch [1] was doing 's/zone->lock/zone->_lock/', which aligns
> >> > to my expectation.  But this patch is doing 's/zone->lock/zone_lock/'.  Same
> >> > for the rest of this patch.
> >> > 
> >> > I was initially thinking this is just a mistake, but I also found Andrew is
> >> > doing same change [2], so I'm bit confused.  Is this an intentional change?
> >> > 
> >> > [1] https://lore.kernel.org/d61500c5784c64e971f4d328c57639303c475f81.1772206930.git.d@ilvokhin.com
> >> > [2] https://lore.kernel.org/20260302143743.220eed4feb36d7572fe726cc@linux-foundation.org
> >> > 
> >> 
> >> Good catch, thanks for pointing this out, SJ.
> >> 
> >> Originally the mechanical rename was indeed zone->lock -> zone->_lock.
> >> However, in Documentation I intentionally switched references to
> >> zone_lock instead of zone->_lock. The reasoning is that _lock is now an
> >> internal implementation detail, and direct access is discouraged. The
> >> intended interface is via the zone_lock_*() / zone_unlock_*() wrappers,
> >> so referencing zone_lock in documentation felt more appropriate than
> >> mentioning the private struct field (zone->_lock).
> > 
> > Thank you for this nice and kind clarification, Dmitry!  I agree mentioning
> > zone_[un]lock_*() helpers instead of the hidden member (zone->_lock) can be
> > better.
> > 
> > But, I'm concerned if people like me might not aware the intention under
> > 'zone_lock'.  If there is a well-known convention that allows people to know it
> > is for 'zone_[un]lock_*()' helpers, making it more clear would be nice, in my
> > humble opinion.  If there is such a convention but I'm just missing it, please
> > ignore.  If I'm not, for eaxmaple,
> > 
> > "protected by ``zone->lock``" could be re-wrote to
> > "protected by ``zone_[un]lock_*()`` locking helpers" or,
> > "protected by zone lock helper functions (``zone_[un]lock_*()``)" ?
> > 
> >> 
> >> That said, I agree this creates inconsistency with the mechanical
> >> rename, and I'm happy to adjust either way: either consistently refer
> >> to the wrapper API, or keep documentation aligned with zone->_lock.
> >> 
> >> I slightly prefer referring to the wrapper API, but don't have a strong
> >> preference as long as we're consistent.
> > 
> > I also think both approaches are good.  But for the wrapper approach, I think
> > giving more contexts rather than just ``zone_lock`` to readers would be nice.
> 
> Grep tells me that we also have comments mentioning simply "zone lock", btw.
> And it's also a term used often in informal conversations. Maybe we could
> just standardize on that in comments/documentations as it's easier to read.
> Discovering that the field is called _lock and that wrappers should be used,
> is hopefully not that difficult.

Thanks for the suggestion, Vlastimil. That sounds reasonable to me as
well. I'll update the comments and documentation to consistently use
"zone lock".
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Dmitry Ilvokhin 3 weeks, 6 days ago
On Thu, Mar 05, 2026 at 06:16:26PM +0000, Dmitry Ilvokhin wrote:
> On Thu, Mar 05, 2026 at 10:27:07AM +0100, Vlastimil Babka (SUSE) wrote:
> > On 3/4/26 16:13, SeongJae Park wrote:
> > > On Wed, 4 Mar 2026 13:01:45 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> > > 
> > >> On Tue, Mar 03, 2026 at 05:50:34PM -0800, SeongJae Park wrote:
> > >> > On Tue, 3 Mar 2026 14:25:55 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> > >> > 
> > >> > > On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
> > >> > > > On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
> > >> > > > 
> > >> > > > > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> > >> > > > > > This intentionally breaks direct users of zone->lock at compile time so
> > >> > > > > > all call sites are converted to the zone lock wrappers. Without the
> > >> > > > > > rename, present and future out-of-tree code could continue using
> > >> > > > > > spin_lock(&zone->lock) and bypass the wrappers and tracing
> > >> > > > > > infrastructure.
> > >> > > > > > 
> > >> > > > > > No functional change intended.
> > >> > > > > > 
> > >> > > > > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> > >> > > > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > >> > > > > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> > >> > > > > > Acked-by: SeongJae Park <sj@kernel.org>
> > >> > > > > 
> > >> > > > > I see some more instances of 'zone->lock' in comments in
> > >> > > > > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
> > >> > > > > 
> > >> > > > 
> > >> > > > I fixed (most of) that in the previous version but my fix was lost.
> > >> > > 
> > >> > > Thanks for the fixups, Andrew.
> > >> > > 
> > >> > > I still see a few 'zone->lock' references in Documentation remain on
> > >> > > mm-new. This patch cleans them up, as noted by Vlastimil.
> > >> > > 
> > >> > > I'm happy to adjust this patch if anything else needs attention.
> > >> > > 
> > >> > > From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
> > >> > > From: Dmitry Ilvokhin <d@ilvokhin.com>
> > >> > > Date: Tue, 3 Mar 2026 06:13:13 -0800
> > >> > > Subject: [PATCH] mm: fix remaining zone->lock references
> > >> > > 
> > >> > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > >> > > ---
> > >> > >  Documentation/mm/physical_memory.rst | 4 ++--
> > >> > >  Documentation/trace/events-kmem.rst  | 8 ++++----
> > >> > >  2 files changed, 6 insertions(+), 6 deletions(-)
> > >> > > 
> > >> > > diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
> > >> > > index b76183545e5b..e344f93515b6 100644
> > >> > > --- a/Documentation/mm/physical_memory.rst
> > >> > > +++ b/Documentation/mm/physical_memory.rst
> > >> > > @@ -500,11 +500,11 @@ General
> > >> > >  ``nr_isolate_pageblock``
> > >> > >    Number of isolated pageblocks. It is used to solve incorrect freepage counting
> > >> > >    problem due to racy retrieving migratetype of pageblock. Protected by
> > >> > > -  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> > >> > > +  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> > >> > 
> > >> > Dmitry's original patch [1] was doing 's/zone->lock/zone->_lock/', which aligns
> > >> > to my expectation.  But this patch is doing 's/zone->lock/zone_lock/'.  Same
> > >> > for the rest of this patch.
> > >> > 
> > >> > I was initially thinking this is just a mistake, but I also found Andrew is
> > >> > doing same change [2], so I'm bit confused.  Is this an intentional change?
> > >> > 
> > >> > [1] https://lore.kernel.org/d61500c5784c64e971f4d328c57639303c475f81.1772206930.git.d@ilvokhin.com
> > >> > [2] https://lore.kernel.org/20260302143743.220eed4feb36d7572fe726cc@linux-foundation.org
> > >> > 
> > >> 
> > >> Good catch, thanks for pointing this out, SJ.
> > >> 
> > >> Originally the mechanical rename was indeed zone->lock -> zone->_lock.
> > >> However, in Documentation I intentionally switched references to
> > >> zone_lock instead of zone->_lock. The reasoning is that _lock is now an
> > >> internal implementation detail, and direct access is discouraged. The
> > >> intended interface is via the zone_lock_*() / zone_unlock_*() wrappers,
> > >> so referencing zone_lock in documentation felt more appropriate than
> > >> mentioning the private struct field (zone->_lock).
> > > 
> > > Thank you for this nice and kind clarification, Dmitry!  I agree mentioning
> > > zone_[un]lock_*() helpers instead of the hidden member (zone->_lock) can be
> > > better.
> > > 
> > > But, I'm concerned if people like me might not aware the intention under
> > > 'zone_lock'.  If there is a well-known convention that allows people to know it
> > > is for 'zone_[un]lock_*()' helpers, making it more clear would be nice, in my
> > > humble opinion.  If there is such a convention but I'm just missing it, please
> > > ignore.  If I'm not, for eaxmaple,
> > > 
> > > "protected by ``zone->lock``" could be re-wrote to
> > > "protected by ``zone_[un]lock_*()`` locking helpers" or,
> > > "protected by zone lock helper functions (``zone_[un]lock_*()``)" ?
> > > 
> > >> 
> > >> That said, I agree this creates inconsistency with the mechanical
> > >> rename, and I'm happy to adjust either way: either consistently refer
> > >> to the wrapper API, or keep documentation aligned with zone->_lock.
> > >> 
> > >> I slightly prefer referring to the wrapper API, but don't have a strong
> > >> preference as long as we're consistent.
> > > 
> > > I also think both approaches are good.  But for the wrapper approach, I think
> > > giving more contexts rather than just ``zone_lock`` to readers would be nice.
> > 
> > Grep tells me that we also have comments mentioning simply "zone lock", btw.
> > And it's also a term used often in informal conversations. Maybe we could
> > just standardize on that in comments/documentations as it's easier to read.
> > Discovering that the field is called _lock and that wrappers should be used,
> > is hopefully not that difficult.
> 
> Thanks for the suggestion, Vlastimil. That sounds reasonable to me as
> well. I'll update the comments and documentation to consistently use
> "zone lock".

Following the suggestion from SJ and Vlastimil, I prepared fixup to
standardize documentation and comments on the term "zone lock".

The patch is based on top of the current mm-new.

Andrew, please let me know if you would prefer a respin of the series
instead.

From 267cda3e0e160f97b346009bc48819bfeed92e52 Mon Sep 17 00:00:00 2001
From: Dmitry Ilvokhin <d@ilvokhin.com>
Date: Thu, 5 Mar 2026 10:36:17 -0800
Subject: [PATCH] mm: documentation: standardize on "zone lock" terminology

During review of the zone lock tracing series it was suggested to
standardize documentation and comments on the term "zone lock"
instead of using zone_lock or referring to the internal field
zone->_lock.

Update references accordingly.

Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
---
 Documentation/mm/physical_memory.rst |  4 ++--
 Documentation/trace/events-kmem.rst  |  8 ++++----
 mm/compaction.c                      |  2 +-
 mm/internal.h                        |  2 +-
 mm/page_alloc.c                      | 12 ++++++------
 mm/page_isolation.c                  |  4 ++--
 mm/page_owner.c                      |  2 +-
 7 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
index e344f93515b6..2398d87ac156 100644
--- a/Documentation/mm/physical_memory.rst
+++ b/Documentation/mm/physical_memory.rst
@@ -500,11 +500,11 @@ General
 ``nr_isolate_pageblock``
   Number of isolated pageblocks. It is used to solve incorrect freepage counting
   problem due to racy retrieving migratetype of pageblock. Protected by
-  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
+  zone lock. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
 
 ``span_seqlock``
   The seqlock to protect ``zone_start_pfn`` and ``spanned_pages``. It is a
-  seqlock because it has to be read outside of ``zone_lock``, and it is done in
+  seqlock because it has to be read outside of zone lock, and it is done in
   the main allocator path. However, the seqlock is written quite infrequently.
   Defined only when ``CONFIG_MEMORY_HOTPLUG`` is enabled.
 
diff --git a/Documentation/trace/events-kmem.rst b/Documentation/trace/events-kmem.rst
index 3c20a972de27..42f08f3b136c 100644
--- a/Documentation/trace/events-kmem.rst
+++ b/Documentation/trace/events-kmem.rst
@@ -57,7 +57,7 @@ the per-CPU allocator (high performance) or the buddy allocator.
 
 If pages are allocated directly from the buddy allocator, the
 mm_page_alloc_zone_locked event is triggered. This event is important as high
-amounts of activity imply high activity on the zone_lock. Taking this lock
+amounts of activity imply high activity on the zone lock. Taking this lock
 impairs performance by disabling interrupts, dirtying cache lines between
 CPUs and serialising many CPUs.
 
@@ -79,11 +79,11 @@ contention on the lruvec->lru_lock.
   mm_page_pcpu_drain		page=%p pfn=%lu order=%d cpu=%d migratetype=%d
 
 In front of the page allocator is a per-cpu page allocator. It exists only
-for order-0 pages, reduces contention on the zone_lock and reduces the
+for order-0 pages, reduces contention on the zone lock and reduces the
 amount of writing on struct page.
 
 When a per-CPU list is empty or pages of the wrong type are allocated,
-the zone_lock will be taken once and the per-CPU list refilled. The event
+the zone lock will be taken once and the per-CPU list refilled. The event
 triggered is mm_page_alloc_zone_locked for each page allocated with the
 event indicating whether it is for a percpu_refill or not.
 
@@ -92,7 +92,7 @@ which triggers a mm_page_pcpu_drain event.
 
 The individual nature of the events is so that pages can be tracked
 between allocation and freeing. A number of drain or refill pages that occur
-consecutively imply the zone_lock being taken once. Large amounts of per-CPU
+consecutively imply the zone lock being taken once. Large amounts of per-CPU
 refills and drains could imply an imbalance between CPUs where too much work
 is being concentrated in one place. It could also indicate that the per-CPU
 lists should be a larger size. Finally, large amounts of refills on one CPU
diff --git a/mm/compaction.c b/mm/compaction.c
index 143ead2cb10a..32623894a632 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1419,7 +1419,7 @@ static bool suitable_migration_target(struct compact_control *cc,
 		int order = cc->order > 0 ? cc->order : pageblock_order;
 
 		/*
-		 * We are checking page_order without zone->_lock taken. But
+		 * We are checking page_order without zone lock taken. But
 		 * the only small danger is that we skip a potentially suitable
 		 * pageblock, so it's not worth to check order for valid range.
 		 */
diff --git a/mm/internal.h b/mm/internal.h
index f634ac469c87..95b583e7e4f7 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -727,7 +727,7 @@ static inline unsigned int buddy_order(struct page *page)
  * (d) a page and its buddy are in the same zone.
  *
  * For recording whether a page is in the buddy system, we set PageBuddy.
- * Setting, clearing, and testing PageBuddy is serialized by zone->_lock.
+ * Setting, clearing, and testing PageBuddy is serialized by zone lock.
  *
  * For recording page's order, we use page_private(page).
  */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4c95364b7063..75ee81445640 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2440,7 +2440,7 @@ enum rmqueue_mode {
 
 /*
  * Do the hard work of removing an element from the buddy allocator.
- * Call me with the zone->_lock already held.
+ * Call me with the zone lock already held.
  */
 static __always_inline struct page *
 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
@@ -2468,7 +2468,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
 	 * fallbacks modes with increasing levels of fragmentation risk.
 	 *
 	 * The fallback logic is expensive and rmqueue_bulk() calls in
-	 * a loop with the zone->_lock held, meaning the freelists are
+	 * a loop with the zone lock held, meaning the freelists are
 	 * not subject to any outside changes. Remember in *mode where
 	 * we found pay dirt, to save us the search on the next call.
 	 */
@@ -7046,7 +7046,7 @@ int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
 	 * pages.  Because of this, we reserve the bigger range and
 	 * once this is done free the pages we are not interested in.
 	 *
-	 * We don't have to hold zone->_lock here because the pages are
+	 * We don't have to hold zone lock here because the pages are
 	 * isolated thus they won't get removed from buddy.
 	 */
 	outer_start = find_large_buddy(start);
@@ -7615,7 +7615,7 @@ void accept_page(struct page *page)
 		return;
 	}
 
-	/* Unlocks zone->_lock */
+	/* Unlocks zone lock */
 	__accept_page(zone, &flags, page);
 }
 
@@ -7632,7 +7632,7 @@ static bool try_to_accept_memory_one(struct zone *zone)
 		return false;
 	}
 
-	/* Unlocks zone->_lock */
+	/* Unlocks zone lock */
 	__accept_page(zone, &flags, page);
 
 	return true;
@@ -7773,7 +7773,7 @@ struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned
 
 	/*
 	 * Best effort allocation from percpu free list.
-	 * If it's empty attempt to spin_trylock zone->_lock.
+	 * If it's empty attempt to spin_trylock zone lock.
 	 */
 	page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
 
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index cf731370e7a7..e8414e9a718a 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -212,7 +212,7 @@ static int set_migratetype_isolate(struct page *page, enum pb_isolate_mode mode,
 	zone_unlock_irqrestore(zone, flags);
 	if (mode == PB_ISOLATE_MODE_MEM_OFFLINE) {
 		/*
-		 * printk() with zone->_lock held will likely trigger a
+		 * printk() with zone lock held will likely trigger a
 		 * lockdep splat, so defer it here.
 		 */
 		dump_page(unmovable, "unmovable page");
@@ -553,7 +553,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
 /*
  * Test all pages in the range is free(means isolated) or not.
  * all pages in [start_pfn...end_pfn) must be in the same zone.
- * zone->_lock must be held before call this.
+ * zone lock must be held before call this.
  *
  * Returns the last tested pfn.
  */
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 54a4ba63b14f..109f2f28f5b1 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -799,7 +799,7 @@ static void init_pages_in_zone(struct zone *zone)
 				continue;
 
 			/*
-			 * To avoid having to grab zone->_lock, be a little
+			 * To avoid having to grab zone lock, be a little
 			 * careful when reading buddy page order. The only
 			 * danger is that we skip too much and potentially miss
 			 * some early allocated pages, which is better than
-- 
2.47.3
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Vlastimil Babka (SUSE) 3 weeks, 6 days ago
On 3/5/26 19:59, Dmitry Ilvokhin wrote:
> On Thu, Mar 05, 2026 at 06:16:26PM +0000, Dmitry Ilvokhin wrote:
>> On Thu, Mar 05, 2026 at 10:27:07AM +0100, Vlastimil Babka (SUSE) wrote:
>> > On 3/4/26 16:13, SeongJae Park wrote:
>> > > On Wed, 4 Mar 2026 13:01:45 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
>> > > 
>> > >> On Tue, Mar 03, 2026 at 05:50:34PM -0800, SeongJae Park wrote:
>> > >> > On Tue, 3 Mar 2026 14:25:55 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
>> > >> > 
>> > >> > > On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
>> > >> > > > On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
>> > >> > > > 
>> > >> > > > > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
>> > >> > > > > > This intentionally breaks direct users of zone->lock at compile time so
>> > >> > > > > > all call sites are converted to the zone lock wrappers. Without the
>> > >> > > > > > rename, present and future out-of-tree code could continue using
>> > >> > > > > > spin_lock(&zone->lock) and bypass the wrappers and tracing
>> > >> > > > > > infrastructure.
>> > >> > > > > > 
>> > >> > > > > > No functional change intended.
>> > >> > > > > > 
>> > >> > > > > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
>> > >> > > > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
>> > >> > > > > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
>> > >> > > > > > Acked-by: SeongJae Park <sj@kernel.org>
>> > >> > > > > 
>> > >> > > > > I see some more instances of 'zone->lock' in comments in
>> > >> > > > > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
>> > >> > > > > 
>> > >> > > > 
>> > >> > > > I fixed (most of) that in the previous version but my fix was lost.
>> > >> > > 
>> > >> > > Thanks for the fixups, Andrew.
>> > >> > > 
>> > >> > > I still see a few 'zone->lock' references in Documentation remain on
>> > >> > > mm-new. This patch cleans them up, as noted by Vlastimil.
>> > >> > > 
>> > >> > > I'm happy to adjust this patch if anything else needs attention.
>> > >> > > 
>> > >> > > From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
>> > >> > > From: Dmitry Ilvokhin <d@ilvokhin.com>
>> > >> > > Date: Tue, 3 Mar 2026 06:13:13 -0800
>> > >> > > Subject: [PATCH] mm: fix remaining zone->lock references
>> > >> > > 
>> > >> > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
>> > >> > > ---
>> > >> > >  Documentation/mm/physical_memory.rst | 4 ++--
>> > >> > >  Documentation/trace/events-kmem.rst  | 8 ++++----
>> > >> > >  2 files changed, 6 insertions(+), 6 deletions(-)
>> > >> > > 
>> > >> > > diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
>> > >> > > index b76183545e5b..e344f93515b6 100644
>> > >> > > --- a/Documentation/mm/physical_memory.rst
>> > >> > > +++ b/Documentation/mm/physical_memory.rst
>> > >> > > @@ -500,11 +500,11 @@ General
>> > >> > >  ``nr_isolate_pageblock``
>> > >> > >    Number of isolated pageblocks. It is used to solve incorrect freepage counting
>> > >> > >    problem due to racy retrieving migratetype of pageblock. Protected by
>> > >> > > -  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
>> > >> > > +  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
>> > >> > 
>> > >> > Dmitry's original patch [1] was doing 's/zone->lock/zone->_lock/', which aligns
>> > >> > to my expectation.  But this patch is doing 's/zone->lock/zone_lock/'.  Same
>> > >> > for the rest of this patch.
>> > >> > 
>> > >> > I was initially thinking this is just a mistake, but I also found Andrew is
>> > >> > doing same change [2], so I'm bit confused.  Is this an intentional change?
>> > >> > 
>> > >> > [1] https://lore.kernel.org/d61500c5784c64e971f4d328c57639303c475f81.1772206930.git.d@ilvokhin.com
>> > >> > [2] https://lore.kernel.org/20260302143743.220eed4feb36d7572fe726cc@linux-foundation.org
>> > >> > 
>> > >> 
>> > >> Good catch, thanks for pointing this out, SJ.
>> > >> 
>> > >> Originally the mechanical rename was indeed zone->lock -> zone->_lock.
>> > >> However, in Documentation I intentionally switched references to
>> > >> zone_lock instead of zone->_lock. The reasoning is that _lock is now an
>> > >> internal implementation detail, and direct access is discouraged. The
>> > >> intended interface is via the zone_lock_*() / zone_unlock_*() wrappers,
>> > >> so referencing zone_lock in documentation felt more appropriate than
>> > >> mentioning the private struct field (zone->_lock).
>> > > 
>> > > Thank you for this nice and kind clarification, Dmitry!  I agree mentioning
>> > > zone_[un]lock_*() helpers instead of the hidden member (zone->_lock) can be
>> > > better.
>> > > 
>> > > But, I'm concerned if people like me might not aware the intention under
>> > > 'zone_lock'.  If there is a well-known convention that allows people to know it
>> > > is for 'zone_[un]lock_*()' helpers, making it more clear would be nice, in my
>> > > humble opinion.  If there is such a convention but I'm just missing it, please
>> > > ignore.  If I'm not, for eaxmaple,
>> > > 
>> > > "protected by ``zone->lock``" could be re-wrote to
>> > > "protected by ``zone_[un]lock_*()`` locking helpers" or,
>> > > "protected by zone lock helper functions (``zone_[un]lock_*()``)" ?
>> > > 
>> > >> 
>> > >> That said, I agree this creates inconsistency with the mechanical
>> > >> rename, and I'm happy to adjust either way: either consistently refer
>> > >> to the wrapper API, or keep documentation aligned with zone->_lock.
>> > >> 
>> > >> I slightly prefer referring to the wrapper API, but don't have a strong
>> > >> preference as long as we're consistent.
>> > > 
>> > > I also think both approaches are good.  But for the wrapper approach, I think
>> > > giving more contexts rather than just ``zone_lock`` to readers would be nice.
>> > 
>> > Grep tells me that we also have comments mentioning simply "zone lock", btw.
>> > And it's also a term used often in informal conversations. Maybe we could
>> > just standardize on that in comments/documentations as it's easier to read.
>> > Discovering that the field is called _lock and that wrappers should be used,
>> > is hopefully not that difficult.
>> 
>> Thanks for the suggestion, Vlastimil. That sounds reasonable to me as
>> well. I'll update the comments and documentation to consistently use
>> "zone lock".
> 
> Following the suggestion from SJ and Vlastimil, I prepared fixup to
> standardize documentation and comments on the term "zone lock".
> 
> The patch is based on top of the current mm-new.
> 
> Andrew, please let me know if you would prefer a respin of the series
> instead.
> 
> From 267cda3e0e160f97b346009bc48819bfeed92e52 Mon Sep 17 00:00:00 2001
> From: Dmitry Ilvokhin <d@ilvokhin.com>
> Date: Thu, 5 Mar 2026 10:36:17 -0800
> Subject: [PATCH] mm: documentation: standardize on "zone lock" terminology
> 
> During review of the zone lock tracing series it was suggested to
> standardize documentation and comments on the term "zone lock"
> instead of using zone_lock or referring to the internal field
> zone->_lock.
> 
> Update references accordingly.
> 
> Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>

Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>

Thanks!
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by SeongJae Park 3 weeks, 6 days ago
On Thu, 5 Mar 2026 18:59:43 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
[...]
> Following the suggestion from SJ and Vlastimil, I prepared fixup to
> standardize documentation and comments on the term "zone lock".
> 
> The patch is based on top of the current mm-new.
> 
> Andrew, please let me know if you would prefer a respin of the series
> instead.
> 
> From 267cda3e0e160f97b346009bc48819bfeed92e52 Mon Sep 17 00:00:00 2001
> From: Dmitry Ilvokhin <d@ilvokhin.com>
> Date: Thu, 5 Mar 2026 10:36:17 -0800
> Subject: [PATCH] mm: documentation: standardize on "zone lock" terminology
> 
> During review of the zone lock tracing series it was suggested to
> standardize documentation and comments on the term "zone lock"
> instead of using zone_lock or referring to the internal field
> zone->_lock.
> 
> Update references accordingly.
> 
> Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>

Acked-by: SeongJae Park <sj@kernel.org>


THanks,
SJ

[...]
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by SeongJae Park 4 weeks ago
On Thu, 5 Mar 2026 10:27:07 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:

> On 3/4/26 16:13, SeongJae Park wrote:
> > On Wed, 4 Mar 2026 13:01:45 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> > 
> >> On Tue, Mar 03, 2026 at 05:50:34PM -0800, SeongJae Park wrote:
> >> > On Tue, 3 Mar 2026 14:25:55 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> >> > 
> >> > > On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
> >> > > > On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
> >> > > > 
> >> > > > > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> >> > > > > > This intentionally breaks direct users of zone->lock at compile time so
> >> > > > > > all call sites are converted to the zone lock wrappers. Without the
> >> > > > > > rename, present and future out-of-tree code could continue using
> >> > > > > > spin_lock(&zone->lock) and bypass the wrappers and tracing
> >> > > > > > infrastructure.
> >> > > > > > 
> >> > > > > > No functional change intended.
> >> > > > > > 
> >> > > > > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> >> > > > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> >> > > > > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> >> > > > > > Acked-by: SeongJae Park <sj@kernel.org>
> >> > > > > 
> >> > > > > I see some more instances of 'zone->lock' in comments in
> >> > > > > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
> >> > > > > 
> >> > > > 
> >> > > > I fixed (most of) that in the previous version but my fix was lost.
> >> > > 
> >> > > Thanks for the fixups, Andrew.
> >> > > 
> >> > > I still see a few 'zone->lock' references in Documentation remain on
> >> > > mm-new. This patch cleans them up, as noted by Vlastimil.
> >> > > 
> >> > > I'm happy to adjust this patch if anything else needs attention.
> >> > > 
> >> > > From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
> >> > > From: Dmitry Ilvokhin <d@ilvokhin.com>
> >> > > Date: Tue, 3 Mar 2026 06:13:13 -0800
> >> > > Subject: [PATCH] mm: fix remaining zone->lock references
> >> > > 
> >> > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> >> > > ---
> >> > >  Documentation/mm/physical_memory.rst | 4 ++--
> >> > >  Documentation/trace/events-kmem.rst  | 8 ++++----
> >> > >  2 files changed, 6 insertions(+), 6 deletions(-)
> >> > > 
> >> > > diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
> >> > > index b76183545e5b..e344f93515b6 100644
> >> > > --- a/Documentation/mm/physical_memory.rst
> >> > > +++ b/Documentation/mm/physical_memory.rst
> >> > > @@ -500,11 +500,11 @@ General
> >> > >  ``nr_isolate_pageblock``
> >> > >    Number of isolated pageblocks. It is used to solve incorrect freepage counting
> >> > >    problem due to racy retrieving migratetype of pageblock. Protected by
> >> > > -  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> >> > > +  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> >> > 
> >> > Dmitry's original patch [1] was doing 's/zone->lock/zone->_lock/', which aligns
> >> > to my expectation.  But this patch is doing 's/zone->lock/zone_lock/'.  Same
> >> > for the rest of this patch.
> >> > 
> >> > I was initially thinking this is just a mistake, but I also found Andrew is
> >> > doing same change [2], so I'm bit confused.  Is this an intentional change?
> >> > 
> >> > [1] https://lore.kernel.org/d61500c5784c64e971f4d328c57639303c475f81.1772206930.git.d@ilvokhin.com
> >> > [2] https://lore.kernel.org/20260302143743.220eed4feb36d7572fe726cc@linux-foundation.org
> >> > 
> >> 
> >> Good catch, thanks for pointing this out, SJ.
> >> 
> >> Originally the mechanical rename was indeed zone->lock -> zone->_lock.
> >> However, in Documentation I intentionally switched references to
> >> zone_lock instead of zone->_lock. The reasoning is that _lock is now an
> >> internal implementation detail, and direct access is discouraged. The
> >> intended interface is via the zone_lock_*() / zone_unlock_*() wrappers,
> >> so referencing zone_lock in documentation felt more appropriate than
> >> mentioning the private struct field (zone->_lock).
> > 
> > Thank you for this nice and kind clarification, Dmitry!  I agree mentioning
> > zone_[un]lock_*() helpers instead of the hidden member (zone->_lock) can be
> > better.
> > 
> > But, I'm concerned if people like me might not aware the intention under
> > 'zone_lock'.  If there is a well-known convention that allows people to know it
> > is for 'zone_[un]lock_*()' helpers, making it more clear would be nice, in my
> > humble opinion.  If there is such a convention but I'm just missing it, please
> > ignore.  If I'm not, for eaxmaple,
> > 
> > "protected by ``zone->lock``" could be re-wrote to
> > "protected by ``zone_[un]lock_*()`` locking helpers" or,
> > "protected by zone lock helper functions (``zone_[un]lock_*()``)" ?
> > 
> >> 
> >> That said, I agree this creates inconsistency with the mechanical
> >> rename, and I'm happy to adjust either way: either consistently refer
> >> to the wrapper API, or keep documentation aligned with zone->_lock.
> >> 
> >> I slightly prefer referring to the wrapper API, but don't have a strong
> >> preference as long as we're consistent.
> > 
> > I also think both approaches are good.  But for the wrapper approach, I think
> > giving more contexts rather than just ``zone_lock`` to readers would be nice.
> 
> Grep tells me that we also have comments mentioning simply "zone lock", btw.
> And it's also a term used often in informal conversations. Maybe we could
> just standardize on that in comments/documentations as it's easier to read.
> Discovering that the field is called _lock and that wrappers should be used,
> is hopefully not that difficult.

Sounds good, that also works for me.


Thanks,
SJ
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by SeongJae Park 4 weeks, 1 day ago
On Wed,  4 Mar 2026 07:13:34 -0800 SeongJae Park <sj@kernel.org> wrote:

> On Wed, 4 Mar 2026 13:01:45 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> 
> > On Tue, Mar 03, 2026 at 05:50:34PM -0800, SeongJae Park wrote:
> > > On Tue, 3 Mar 2026 14:25:55 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote:
> > > 
> > > > On Mon, Mar 02, 2026 at 02:37:43PM -0800, Andrew Morton wrote:
> > > > > On Mon, 2 Mar 2026 15:10:03 +0100 "Vlastimil Babka (SUSE)" <vbabka@kernel.org> wrote:
> > > > > 
> > > > > > On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> > > > > > > This intentionally breaks direct users of zone->lock at compile time so
> > > > > > > all call sites are converted to the zone lock wrappers. Without the
> > > > > > > rename, present and future out-of-tree code could continue using
> > > > > > > spin_lock(&zone->lock) and bypass the wrappers and tracing
> > > > > > > infrastructure.
> > > > > > > 
> > > > > > > No functional change intended.
> > > > > > > 
> > > > > > > Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> > > > > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > > > > > > Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> > > > > > > Acked-by: SeongJae Park <sj@kernel.org>
> > > > > > 
> > > > > > I see some more instances of 'zone->lock' in comments in
> > > > > > include/linux/mmzone.h and under Documentation/ but otherwise LGTM.
> > > > > > 
> > > > > 
> > > > > I fixed (most of) that in the previous version but my fix was lost.
> > > > 
> > > > Thanks for the fixups, Andrew.
> > > > 
> > > > I still see a few 'zone->lock' references in Documentation remain on
> > > > mm-new. This patch cleans them up, as noted by Vlastimil.
> > > > 
> > > > I'm happy to adjust this patch if anything else needs attention.
> > > > 
> > > > From 9142d5a8b60038fa424a6033253960682e5a51f4 Mon Sep 17 00:00:00 2001
> > > > From: Dmitry Ilvokhin <d@ilvokhin.com>
> > > > Date: Tue, 3 Mar 2026 06:13:13 -0800
> > > > Subject: [PATCH] mm: fix remaining zone->lock references
> > > > 
> > > > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> > > > ---
> > > >  Documentation/mm/physical_memory.rst | 4 ++--
> > > >  Documentation/trace/events-kmem.rst  | 8 ++++----
> > > >  2 files changed, 6 insertions(+), 6 deletions(-)
> > > > 
> > > > diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst
> > > > index b76183545e5b..e344f93515b6 100644
> > > > --- a/Documentation/mm/physical_memory.rst
> > > > +++ b/Documentation/mm/physical_memory.rst
> > > > @@ -500,11 +500,11 @@ General
> > > >  ``nr_isolate_pageblock``
> > > >    Number of isolated pageblocks. It is used to solve incorrect freepage counting
> > > >    problem due to racy retrieving migratetype of pageblock. Protected by
> > > > -  ``zone->lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> > > > +  ``zone_lock``. Defined only when ``CONFIG_MEMORY_ISOLATION`` is enabled.
> > > 
> > > Dmitry's original patch [1] was doing 's/zone->lock/zone->_lock/', which aligns
> > > to my expectation.  But this patch is doing 's/zone->lock/zone_lock/'.  Same
> > > for the rest of this patch.
> > > 
> > > I was initially thinking this is just a mistake, but I also found Andrew is
> > > doing same change [2], so I'm bit confused.  Is this an intentional change?
> > > 
> > > [1] https://lore.kernel.org/d61500c5784c64e971f4d328c57639303c475f81.1772206930.git.d@ilvokhin.com
> > > [2] https://lore.kernel.org/20260302143743.220eed4feb36d7572fe726cc@linux-foundation.org
> > > 
> > 
> > Good catch, thanks for pointing this out, SJ.
> > 
> > Originally the mechanical rename was indeed zone->lock -> zone->_lock.
> > However, in Documentation I intentionally switched references to
> > zone_lock instead of zone->_lock. The reasoning is that _lock is now an
> > internal implementation detail, and direct access is discouraged. The
> > intended interface is via the zone_lock_*() / zone_unlock_*() wrappers,
> > so referencing zone_lock in documentation felt more appropriate than
> > mentioning the private struct field (zone->_lock).
> 
> Thank you for this nice and kind clarification, Dmitry!  I agree mentioning
> zone_[un]lock_*() helpers instead of the hidden member (zone->_lock) can be
> better.
> 
> But, I'm concerned if people like me might not aware the intention under
> 'zone_lock'.  If there is a well-known convention that allows people to know it
> is for 'zone_[un]lock_*()' helpers, making it more clear would be nice, in my
> humble opinion.  If there is such a convention but I'm just missing it, please
> ignore.  If I'm not, for eaxmaple,
> 
> "protected by ``zone->lock``" could be re-wrote to
> "protected by ``zone_[un]lock_*()`` locking helpers" or,
> "protected by zone lock helper functions (``zone_[un]lock_*()``)" ?

Maybe too verbose and people who not used to regex might be confused.
Mentioning mmzone_lock.h might be better?  E.g.,

    protected by functions in mmzone_lock.h

> 
> > 
> > That said, I agree this creates inconsistency with the mechanical
> > rename, and I'm happy to adjust either way: either consistently refer
> > to the wrapper API, or keep documentation aligned with zone->_lock.
> > 
> > I slightly prefer referring to the wrapper API, but don't have a strong
> > preference as long as we're consistent.
> 
> I also think both approaches are good.  But for the wrapper approach, I think
> giving more contexts rather than just ``zone_lock`` to readers would be nice.


Thanks,
SJ

[...]
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by Zi Yan 1 month ago
On 27 Feb 2026, at 11:00, Dmitry Ilvokhin wrote:

> This intentionally breaks direct users of zone->lock at compile time so
> all call sites are converted to the zone lock wrappers. Without the
> rename, present and future out-of-tree code could continue using
> spin_lock(&zone->lock) and bypass the wrappers and tracing
> infrastructure.
>
> No functional change intended.
>
> Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> Acked-by: SeongJae Park <sj@kernel.org>
> ---
>  include/linux/mmzone.h      |  7 +++++--
>  include/linux/mmzone_lock.h | 12 ++++++------
>  mm/compaction.c             |  4 ++--
>  mm/internal.h               |  2 +-
>  mm/page_alloc.c             | 16 ++++++++--------
>  mm/page_isolation.c         |  4 ++--
>  mm/page_owner.c             |  2 +-
>  7 files changed, 25 insertions(+), 22 deletions(-)
>
Acked-by: Zi Yan <ziy@nvidia.com>

Best Regards,
Yan, Zi
Re: [PATCH v4 4/5] mm: rename zone->lock to zone->_lock
Posted by David Hildenbrand (Arm) 1 month ago
On 2/27/26 17:00, Dmitry Ilvokhin wrote:
> This intentionally breaks direct users of zone->lock at compile time so
> all call sites are converted to the zone lock wrappers. Without the
> rename, present and future out-of-tree code could continue using
> spin_lock(&zone->lock) and bypass the wrappers and tracing
> infrastructure.
> 
> No functional change intended.
> 
> Suggested-by: Andrew Morton <akpm@linux-foundation.org>
> Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
> Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
> Acked-by: SeongJae Park <sj@kernel.org>
> ---

Acked-by: David Hildenbrand (Arm) <david@kernel.org>

-- 
Cheers,

David