This intentionally breaks direct users of zone->lock at compile time so
all call sites are converted to the zone lock wrappers. Without the
rename, present and future out-of-tree code could continue using
spin_lock(&zone->lock) and bypass the wrappers and tracing
infrastructure.
No functional change intended.
Suggested-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com>
---
include/linux/mmzone.h | 7 +++++--
include/linux/zone_lock.h | 12 ++++++------
mm/compaction.c | 4 ++--
mm/internal.h | 2 +-
mm/page_alloc.c | 16 ++++++++--------
mm/page_isolation.c | 4 ++--
mm/page_owner.c | 2 +-
7 files changed, 25 insertions(+), 22 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3e51190a55e4..32bca655fce5 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1009,8 +1009,11 @@ struct zone {
/* zone flags, see below */
unsigned long flags;
- /* Primarily protects free_area */
- spinlock_t lock;
+ /*
+ * Primarily protects free_area. Should be accessed via zone_lock_*
+ * helpers.
+ */
+ spinlock_t _lock;
/* Pages to be freed when next trylock succeeds */
struct llist_head trylock_free_pages;
diff --git a/include/linux/zone_lock.h b/include/linux/zone_lock.h
index c531e26280e6..5ce1aa38d500 100644
--- a/include/linux/zone_lock.h
+++ b/include/linux/zone_lock.h
@@ -7,32 +7,32 @@
static inline void zone_lock_init(struct zone *zone)
{
- spin_lock_init(&zone->lock);
+ spin_lock_init(&zone->_lock);
}
#define zone_lock_irqsave(zone, flags) \
do { \
- spin_lock_irqsave(&(zone)->lock, flags); \
+ spin_lock_irqsave(&(zone)->_lock, flags); \
} while (0)
#define zone_trylock_irqsave(zone, flags) \
({ \
- spin_trylock_irqsave(&(zone)->lock, flags); \
+ spin_trylock_irqsave(&(zone)->_lock, flags); \
})
static inline void zone_unlock_irqrestore(struct zone *zone, unsigned long flags)
{
- spin_unlock_irqrestore(&zone->lock, flags);
+ spin_unlock_irqrestore(&zone->_lock, flags);
}
static inline void zone_lock_irq(struct zone *zone)
{
- spin_lock_irq(&zone->lock);
+ spin_lock_irq(&zone->_lock);
}
static inline void zone_unlock_irq(struct zone *zone)
{
- spin_unlock_irq(&zone->lock);
+ spin_unlock_irq(&zone->_lock);
}
#endif /* _LINUX_ZONE_LOCK_H */
diff --git a/mm/compaction.c b/mm/compaction.c
index 9f7997e827bd..aed5bf468fd3 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -506,7 +506,7 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page)
static bool compact_zone_lock_irqsave(struct zone *zone,
unsigned long *flags,
struct compact_control *cc)
-__acquires(&zone->lock)
+__acquires(&zone->_lock)
{
/* Track if the lock is contended in async mode */
if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
@@ -1402,7 +1402,7 @@ static bool suitable_migration_target(struct compact_control *cc,
int order = cc->order > 0 ? cc->order : pageblock_order;
/*
- * We are checking page_order without zone->lock taken. But
+ * We are checking page_order without zone->_lock taken. But
* the only small danger is that we skip a potentially suitable
* pageblock, so it's not worth to check order for valid range.
*/
diff --git a/mm/internal.h b/mm/internal.h
index cb0af847d7d9..6cb06e21ce15 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -710,7 +710,7 @@ static inline unsigned int buddy_order(struct page *page)
* (d) a page and its buddy are in the same zone.
*
* For recording whether a page is in the buddy system, we set PageBuddy.
- * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
+ * Setting, clearing, and testing PageBuddy is serialized by zone->_lock.
*
* For recording page's order, we use page_private(page).
*/
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c5d13fe9b79f..56ca27a07a62 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -815,7 +815,7 @@ compaction_capture(struct capture_control *capc, struct page *page,
static inline void account_freepages(struct zone *zone, int nr_pages,
int migratetype)
{
- lockdep_assert_held(&zone->lock);
+ lockdep_assert_held(&zone->_lock);
if (is_migrate_isolate(migratetype))
return;
@@ -2473,7 +2473,7 @@ enum rmqueue_mode {
/*
* Do the hard work of removing an element from the buddy allocator.
- * Call me with the zone->lock already held.
+ * Call me with the zone->_lock already held.
*/
static __always_inline struct page *
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
@@ -2501,7 +2501,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
* fallbacks modes with increasing levels of fragmentation risk.
*
* The fallback logic is expensive and rmqueue_bulk() calls in
- * a loop with the zone->lock held, meaning the freelists are
+ * a loop with the zone->_lock held, meaning the freelists are
* not subject to any outside changes. Remember in *mode where
* we found pay dirt, to save us the search on the next call.
*/
@@ -3203,7 +3203,7 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
struct zone *zone = page_zone(page);
/* zone lock should be held when this function is called */
- lockdep_assert_held(&zone->lock);
+ lockdep_assert_held(&zone->_lock);
/* Return isolated page to tail of freelist. */
__free_one_page(page, page_to_pfn(page), zone, order, mt,
@@ -7086,7 +7086,7 @@ int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
* pages. Because of this, we reserve the bigger range and
* once this is done free the pages we are not interested in.
*
- * We don't have to hold zone->lock here because the pages are
+ * We don't have to hold zone->_lock here because the pages are
* isolated thus they won't get removed from buddy.
*/
outer_start = find_large_buddy(start);
@@ -7655,7 +7655,7 @@ void accept_page(struct page *page)
return;
}
- /* Unlocks zone->lock */
+ /* Unlocks zone->_lock */
__accept_page(zone, &flags, page);
}
@@ -7672,7 +7672,7 @@ static bool try_to_accept_memory_one(struct zone *zone)
return false;
}
- /* Unlocks zone->lock */
+ /* Unlocks zone->_lock */
__accept_page(zone, &flags, page);
return true;
@@ -7813,7 +7813,7 @@ struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned
/*
* Best effort allocation from percpu free list.
- * If it's empty attempt to spin_trylock zone->lock.
+ * If it's empty attempt to spin_trylock zone->_lock.
*/
page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 56a272f38b66..78b58dae2015 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -212,7 +212,7 @@ static int set_migratetype_isolate(struct page *page, enum pb_isolate_mode mode,
zone_unlock_irqrestore(zone, flags);
if (mode == PB_ISOLATE_MODE_MEM_OFFLINE) {
/*
- * printk() with zone->lock held will likely trigger a
+ * printk() with zone->_lock held will likely trigger a
* lockdep splat, so defer it here.
*/
dump_page(unmovable, "unmovable page");
@@ -553,7 +553,7 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
/*
* Test all pages in the range is free(means isolated) or not.
* all pages in [start_pfn...end_pfn) must be in the same zone.
- * zone->lock must be held before call this.
+ * zone->_lock must be held before call this.
*
* Returns the last tested pfn.
*/
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 8178e0be557f..54a4ba63b14f 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -799,7 +799,7 @@ static void init_pages_in_zone(struct zone *zone)
continue;
/*
- * To avoid having to grab zone->lock, be a little
+ * To avoid having to grab zone->_lock, be a little
* careful when reading buddy page order. The only
* danger is that we skip too much and potentially miss
* some early allocated pages, which is better than
--
2.47.3
On Thu, 26 Feb 2026 18:26:21 +0000 Dmitry Ilvokhin <d@ilvokhin.com> wrote: > This intentionally breaks direct users of zone->lock at compile time so > all call sites are converted to the zone lock wrappers. Without the > rename, present and future out-of-tree code could continue using > spin_lock(&zone->lock) and bypass the wrappers and tracing > infrastructure. > > No functional change intended. > > Suggested-by: Andrew Morton <akpm@linux-foundation.org> > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com> Acked-by: SeongJae Park <sj@kernel.org> Thanks, SJ [...]
Hi Dmitry,
kernel test robot noticed the following build errors:
[auto build test ERROR on linus/master]
[also build test ERROR on v7.0-rc1 next-20260226]
[cannot apply to akpm-mm/mm-everything rppt-memblock/for-next rppt-memblock/fixes]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Dmitry-Ilvokhin/mm-introduce-zone-lock-wrappers/20260227-022914
base: linus/master
patch link: https://lore.kernel.org/r/1221b8e7fa9f5694f3c4e411f01581b5aba9bc63.1772129168.git.d%40ilvokhin.com
patch subject: [PATCH v3 4/5] mm: rename zone->lock to zone->_lock
config: x86_64-defconfig (https://download.01.org/0day-ci/archive/20260227/202602270740.0RL1uwsV-lkp@intel.com/config)
compiler: gcc-14 (Debian 14.2.0-19) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260227/202602270740.0RL1uwsV-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602270740.0RL1uwsV-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from include/linux/sched.h:37,
from include/linux/percpu.h:12,
from arch/x86/include/asm/msr.h:16,
from arch/x86/include/asm/tsc.h:11,
from arch/x86/include/asm/timex.h:6,
from include/linux/timex.h:67,
from include/linux/time32.h:13,
from include/linux/time.h:60,
from include/linux/stat.h:19,
from include/linux/module.h:13,
from kernel/power/snapshot.c:14:
kernel/power/snapshot.c: In function 'mark_free_pages':
>> kernel/power/snapshot.c:1254:34: error: 'struct zone' has no member named 'lock'; did you mean '_lock'?
1254 | spin_lock_irqsave(&zone->lock, flags);
| ^~~~
include/linux/spinlock.h:244:48: note: in definition of macro 'raw_spin_lock_irqsave'
244 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
kernel/power/snapshot.c:1254:9: note: in expansion of macro 'spin_lock_irqsave'
1254 | spin_lock_irqsave(&zone->lock, flags);
| ^~~~~~~~~~~~~~~~~
kernel/power/snapshot.c:1287:39: error: 'struct zone' has no member named 'lock'; did you mean '_lock'?
1287 | spin_unlock_irqrestore(&zone->lock, flags);
| ^~~~
| _lock
vim +1254 kernel/power/snapshot.c
31a1b9d7fe768d Kefeng Wang 2023-05-16 1243
31a1b9d7fe768d Kefeng Wang 2023-05-16 1244 static void mark_free_pages(struct zone *zone)
31a1b9d7fe768d Kefeng Wang 2023-05-16 1245 {
31a1b9d7fe768d Kefeng Wang 2023-05-16 1246 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
31a1b9d7fe768d Kefeng Wang 2023-05-16 1247 unsigned long flags;
31a1b9d7fe768d Kefeng Wang 2023-05-16 1248 unsigned int order, t;
31a1b9d7fe768d Kefeng Wang 2023-05-16 1249 struct page *page;
31a1b9d7fe768d Kefeng Wang 2023-05-16 1250
31a1b9d7fe768d Kefeng Wang 2023-05-16 1251 if (zone_is_empty(zone))
31a1b9d7fe768d Kefeng Wang 2023-05-16 1252 return;
31a1b9d7fe768d Kefeng Wang 2023-05-16 1253
31a1b9d7fe768d Kefeng Wang 2023-05-16 @1254 spin_lock_irqsave(&zone->lock, flags);
31a1b9d7fe768d Kefeng Wang 2023-05-16 1255
31a1b9d7fe768d Kefeng Wang 2023-05-16 1256 max_zone_pfn = zone_end_pfn(zone);
312eca8a14c5f5 David Woodhouse 2025-04-23 1257 for_each_valid_pfn(pfn, zone->zone_start_pfn, max_zone_pfn) {
31a1b9d7fe768d Kefeng Wang 2023-05-16 1258 page = pfn_to_page(pfn);
31a1b9d7fe768d Kefeng Wang 2023-05-16 1259
31a1b9d7fe768d Kefeng Wang 2023-05-16 1260 if (!--page_count) {
31a1b9d7fe768d Kefeng Wang 2023-05-16 1261 touch_nmi_watchdog();
31a1b9d7fe768d Kefeng Wang 2023-05-16 1262 page_count = WD_PAGE_COUNT;
31a1b9d7fe768d Kefeng Wang 2023-05-16 1263 }
31a1b9d7fe768d Kefeng Wang 2023-05-16 1264
31a1b9d7fe768d Kefeng Wang 2023-05-16 1265 if (page_zone(page) != zone)
31a1b9d7fe768d Kefeng Wang 2023-05-16 1266 continue;
31a1b9d7fe768d Kefeng Wang 2023-05-16 1267
31a1b9d7fe768d Kefeng Wang 2023-05-16 1268 if (!swsusp_page_is_forbidden(page))
31a1b9d7fe768d Kefeng Wang 2023-05-16 1269 swsusp_unset_page_free(page);
31a1b9d7fe768d Kefeng Wang 2023-05-16 1270 }
31a1b9d7fe768d Kefeng Wang 2023-05-16 1271
31a1b9d7fe768d Kefeng Wang 2023-05-16 1272 for_each_migratetype_order(order, t) {
31a1b9d7fe768d Kefeng Wang 2023-05-16 1273 list_for_each_entry(page,
31a1b9d7fe768d Kefeng Wang 2023-05-16 1274 &zone->free_area[order].free_list[t], buddy_list) {
31a1b9d7fe768d Kefeng Wang 2023-05-16 1275 unsigned long i;
31a1b9d7fe768d Kefeng Wang 2023-05-16 1276
31a1b9d7fe768d Kefeng Wang 2023-05-16 1277 pfn = page_to_pfn(page);
31a1b9d7fe768d Kefeng Wang 2023-05-16 1278 for (i = 0; i < (1UL << order); i++) {
31a1b9d7fe768d Kefeng Wang 2023-05-16 1279 if (!--page_count) {
31a1b9d7fe768d Kefeng Wang 2023-05-16 1280 touch_nmi_watchdog();
31a1b9d7fe768d Kefeng Wang 2023-05-16 1281 page_count = WD_PAGE_COUNT;
31a1b9d7fe768d Kefeng Wang 2023-05-16 1282 }
31a1b9d7fe768d Kefeng Wang 2023-05-16 1283 swsusp_set_page_free(pfn_to_page(pfn + i));
31a1b9d7fe768d Kefeng Wang 2023-05-16 1284 }
31a1b9d7fe768d Kefeng Wang 2023-05-16 1285 }
31a1b9d7fe768d Kefeng Wang 2023-05-16 1286 }
31a1b9d7fe768d Kefeng Wang 2023-05-16 1287 spin_unlock_irqrestore(&zone->lock, flags);
31a1b9d7fe768d Kefeng Wang 2023-05-16 1288 }
31a1b9d7fe768d Kefeng Wang 2023-05-16 1289
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi Dmitry,
kernel test robot noticed the following build errors:
[auto build test ERROR on linus/master]
[also build test ERROR on v7.0-rc1 next-20260226]
[cannot apply to akpm-mm/mm-everything rppt-memblock/for-next rppt-memblock/fixes]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Dmitry-Ilvokhin/mm-introduce-zone-lock-wrappers/20260227-022914
base: linus/master
patch link: https://lore.kernel.org/r/1221b8e7fa9f5694f3c4e411f01581b5aba9bc63.1772129168.git.d%40ilvokhin.com
patch subject: [PATCH v3 4/5] mm: rename zone->lock to zone->_lock
config: microblaze-randconfig-r073-20260227 (https://download.01.org/0day-ci/archive/20260227/202602270508.8MKXotxZ-lkp@intel.com/config)
compiler: microblaze-linux-gcc (GCC) 11.5.0
smatch version: v0.5.0-8994-gd50c5a4c
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260227/202602270508.8MKXotxZ-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602270508.8MKXotxZ-lkp@intel.com/
All errors (new ones prefixed by >>):
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:8,
from mm/shuffle.c:4:
mm/shuffle.c: In function '__shuffle_zone':
>> mm/shuffle.c:88:31: error: 'struct zone' has no member named 'lock'; did you mean '_lock'?
88 | spin_lock_irqsave(&z->lock, flags);
| ^~~~
include/linux/spinlock.h:244:48: note: in definition of macro 'raw_spin_lock_irqsave'
244 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/shuffle.c:88:9: note: in expansion of macro 'spin_lock_irqsave'
88 | spin_lock_irqsave(&z->lock, flags);
| ^~~~~~~~~~~~~~~~~
mm/shuffle.c:141:52: error: 'struct zone' has no member named 'lock'; did you mean '_lock'?
141 | spin_unlock_irqrestore(&z->lock, flags);
| ^~~~
| _lock
In file included from include/linux/mmzone.h:8,
from include/linux/gfp.h:7,
from include/linux/mm.h:8,
from mm/shuffle.c:4:
mm/shuffle.c:143:47: error: 'struct zone' has no member named 'lock'; did you mean '_lock'?
143 | spin_lock_irqsave(&z->lock, flags);
| ^~~~
include/linux/spinlock.h:244:48: note: in definition of macro 'raw_spin_lock_irqsave'
244 | flags = _raw_spin_lock_irqsave(lock); \
| ^~~~
mm/shuffle.c:143:25: note: in expansion of macro 'spin_lock_irqsave'
143 | spin_lock_irqsave(&z->lock, flags);
| ^~~~~~~~~~~~~~~~~
mm/shuffle.c:146:36: error: 'struct zone' has no member named 'lock'; did you mean '_lock'?
146 | spin_unlock_irqrestore(&z->lock, flags);
| ^~~~
| _lock
vim +88 mm/shuffle.c
e900a918b0984e Dan Williams 2019-05-14 3
e900a918b0984e Dan Williams 2019-05-14 @4 #include <linux/mm.h>
e900a918b0984e Dan Williams 2019-05-14 5 #include <linux/init.h>
e900a918b0984e Dan Williams 2019-05-14 6 #include <linux/mmzone.h>
e900a918b0984e Dan Williams 2019-05-14 7 #include <linux/random.h>
e900a918b0984e Dan Williams 2019-05-14 8 #include <linux/moduleparam.h>
e900a918b0984e Dan Williams 2019-05-14 9 #include "internal.h"
e900a918b0984e Dan Williams 2019-05-14 10 #include "shuffle.h"
e900a918b0984e Dan Williams 2019-05-14 11
e900a918b0984e Dan Williams 2019-05-14 12 DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
e900a918b0984e Dan Williams 2019-05-14 13
e900a918b0984e Dan Williams 2019-05-14 14 static bool shuffle_param;
e900a918b0984e Dan Williams 2019-05-14 15
85a34107eba913 Liu Shixin 2022-09-09 16 static __meminit int shuffle_param_set(const char *val,
e900a918b0984e Dan Williams 2019-05-14 17 const struct kernel_param *kp)
e900a918b0984e Dan Williams 2019-05-14 18 {
85a34107eba913 Liu Shixin 2022-09-09 19 if (param_set_bool(val, kp))
85a34107eba913 Liu Shixin 2022-09-09 20 return -EINVAL;
85a34107eba913 Liu Shixin 2022-09-09 21 if (*(bool *)kp->arg)
839195352d8235 David Hildenbrand 2020-08-06 22 static_branch_enable(&page_alloc_shuffle_key);
e900a918b0984e Dan Williams 2019-05-14 23 return 0;
e900a918b0984e Dan Williams 2019-05-14 24 }
85a34107eba913 Liu Shixin 2022-09-09 25
85a34107eba913 Liu Shixin 2022-09-09 26 static const struct kernel_param_ops shuffle_param_ops = {
85a34107eba913 Liu Shixin 2022-09-09 27 .set = shuffle_param_set,
85a34107eba913 Liu Shixin 2022-09-09 28 .get = param_get_bool,
85a34107eba913 Liu Shixin 2022-09-09 29 };
85a34107eba913 Liu Shixin 2022-09-09 30 module_param_cb(shuffle, &shuffle_param_ops, &shuffle_param, 0400);
e900a918b0984e Dan Williams 2019-05-14 31
e900a918b0984e Dan Williams 2019-05-14 32 /*
e900a918b0984e Dan Williams 2019-05-14 33 * For two pages to be swapped in the shuffle, they must be free (on a
e900a918b0984e Dan Williams 2019-05-14 34 * 'free_area' lru), have the same order, and have the same migratetype.
e900a918b0984e Dan Williams 2019-05-14 35 */
4a93025cbe4a0b David Hildenbrand 2020-08-06 36 static struct page * __meminit shuffle_valid_page(struct zone *zone,
4a93025cbe4a0b David Hildenbrand 2020-08-06 37 unsigned long pfn, int order)
e900a918b0984e Dan Williams 2019-05-14 38 {
4a93025cbe4a0b David Hildenbrand 2020-08-06 39 struct page *page = pfn_to_online_page(pfn);
e900a918b0984e Dan Williams 2019-05-14 40
e900a918b0984e Dan Williams 2019-05-14 41 /*
e900a918b0984e Dan Williams 2019-05-14 42 * Given we're dealing with randomly selected pfns in a zone we
e900a918b0984e Dan Williams 2019-05-14 43 * need to ask questions like...
e900a918b0984e Dan Williams 2019-05-14 44 */
e900a918b0984e Dan Williams 2019-05-14 45
4a93025cbe4a0b David Hildenbrand 2020-08-06 46 /* ... is the page managed by the buddy? */
4a93025cbe4a0b David Hildenbrand 2020-08-06 47 if (!page)
e900a918b0984e Dan Williams 2019-05-14 48 return NULL;
e900a918b0984e Dan Williams 2019-05-14 49
4a93025cbe4a0b David Hildenbrand 2020-08-06 50 /* ... is the page assigned to the same zone? */
4a93025cbe4a0b David Hildenbrand 2020-08-06 51 if (page_zone(page) != zone)
e900a918b0984e Dan Williams 2019-05-14 52 return NULL;
e900a918b0984e Dan Williams 2019-05-14 53
e900a918b0984e Dan Williams 2019-05-14 54 /* ...is the page free and currently on a free_area list? */
e900a918b0984e Dan Williams 2019-05-14 55 if (!PageBuddy(page))
e900a918b0984e Dan Williams 2019-05-14 56 return NULL;
e900a918b0984e Dan Williams 2019-05-14 57
e900a918b0984e Dan Williams 2019-05-14 58 /*
e900a918b0984e Dan Williams 2019-05-14 59 * ...is the page on the same list as the page we will
e900a918b0984e Dan Williams 2019-05-14 60 * shuffle it with?
e900a918b0984e Dan Williams 2019-05-14 61 */
ab130f9108dcf2 Matthew Wilcox (Oracle 2020-10-15 62) if (buddy_order(page) != order)
e900a918b0984e Dan Williams 2019-05-14 63 return NULL;
e900a918b0984e Dan Williams 2019-05-14 64
e900a918b0984e Dan Williams 2019-05-14 65 return page;
e900a918b0984e Dan Williams 2019-05-14 66 }
e900a918b0984e Dan Williams 2019-05-14 67
e900a918b0984e Dan Williams 2019-05-14 68 /*
e900a918b0984e Dan Williams 2019-05-14 69 * Fisher-Yates shuffle the freelist which prescribes iterating through an
e900a918b0984e Dan Williams 2019-05-14 70 * array, pfns in this case, and randomly swapping each entry with another in
e900a918b0984e Dan Williams 2019-05-14 71 * the span, end_pfn - start_pfn.
e900a918b0984e Dan Williams 2019-05-14 72 *
e900a918b0984e Dan Williams 2019-05-14 73 * To keep the implementation simple it does not attempt to correct for sources
e900a918b0984e Dan Williams 2019-05-14 74 * of bias in the distribution, like modulo bias or pseudo-random number
e900a918b0984e Dan Williams 2019-05-14 75 * generator bias. I.e. the expectation is that this shuffling raises the bar
e900a918b0984e Dan Williams 2019-05-14 76 * for attacks that exploit the predictability of page allocations, but need not
e900a918b0984e Dan Williams 2019-05-14 77 * be a perfect shuffle.
e900a918b0984e Dan Williams 2019-05-14 78 */
e900a918b0984e Dan Williams 2019-05-14 79 #define SHUFFLE_RETRY 10
e900a918b0984e Dan Williams 2019-05-14 80 void __meminit __shuffle_zone(struct zone *z)
e900a918b0984e Dan Williams 2019-05-14 81 {
e900a918b0984e Dan Williams 2019-05-14 82 unsigned long i, flags;
e900a918b0984e Dan Williams 2019-05-14 83 unsigned long start_pfn = z->zone_start_pfn;
e900a918b0984e Dan Williams 2019-05-14 84 unsigned long end_pfn = zone_end_pfn(z);
e900a918b0984e Dan Williams 2019-05-14 85 const int order = SHUFFLE_ORDER;
e900a918b0984e Dan Williams 2019-05-14 86 const int order_pages = 1 << order;
e900a918b0984e Dan Williams 2019-05-14 87
e900a918b0984e Dan Williams 2019-05-14 @88 spin_lock_irqsave(&z->lock, flags);
e900a918b0984e Dan Williams 2019-05-14 89 start_pfn = ALIGN(start_pfn, order_pages);
e900a918b0984e Dan Williams 2019-05-14 90 for (i = start_pfn; i < end_pfn; i += order_pages) {
e900a918b0984e Dan Williams 2019-05-14 91 unsigned long j;
e900a918b0984e Dan Williams 2019-05-14 92 int migratetype, retry;
e900a918b0984e Dan Williams 2019-05-14 93 struct page *page_i, *page_j;
e900a918b0984e Dan Williams 2019-05-14 94
e900a918b0984e Dan Williams 2019-05-14 95 /*
e900a918b0984e Dan Williams 2019-05-14 96 * We expect page_i, in the sub-range of a zone being added
e900a918b0984e Dan Williams 2019-05-14 97 * (@start_pfn to @end_pfn), to more likely be valid compared to
e900a918b0984e Dan Williams 2019-05-14 98 * page_j randomly selected in the span @zone_start_pfn to
e900a918b0984e Dan Williams 2019-05-14 99 * @spanned_pages.
e900a918b0984e Dan Williams 2019-05-14 100 */
4a93025cbe4a0b David Hildenbrand 2020-08-06 101 page_i = shuffle_valid_page(z, i, order);
e900a918b0984e Dan Williams 2019-05-14 102 if (!page_i)
e900a918b0984e Dan Williams 2019-05-14 103 continue;
e900a918b0984e Dan Williams 2019-05-14 104
e900a918b0984e Dan Williams 2019-05-14 105 for (retry = 0; retry < SHUFFLE_RETRY; retry++) {
e900a918b0984e Dan Williams 2019-05-14 106 /*
e900a918b0984e Dan Williams 2019-05-14 107 * Pick a random order aligned page in the zone span as
e900a918b0984e Dan Williams 2019-05-14 108 * a swap target. If the selected pfn is a hole, retry
e900a918b0984e Dan Williams 2019-05-14 109 * up to SHUFFLE_RETRY attempts find a random valid pfn
e900a918b0984e Dan Williams 2019-05-14 110 * in the zone.
e900a918b0984e Dan Williams 2019-05-14 111 */
e900a918b0984e Dan Williams 2019-05-14 112 j = z->zone_start_pfn +
e900a918b0984e Dan Williams 2019-05-14 113 ALIGN_DOWN(get_random_long() % z->spanned_pages,
e900a918b0984e Dan Williams 2019-05-14 114 order_pages);
4a93025cbe4a0b David Hildenbrand 2020-08-06 115 page_j = shuffle_valid_page(z, j, order);
e900a918b0984e Dan Williams 2019-05-14 116 if (page_j && page_j != page_i)
e900a918b0984e Dan Williams 2019-05-14 117 break;
e900a918b0984e Dan Williams 2019-05-14 118 }
e900a918b0984e Dan Williams 2019-05-14 119 if (retry >= SHUFFLE_RETRY) {
e900a918b0984e Dan Williams 2019-05-14 120 pr_debug("%s: failed to swap %#lx\n", __func__, i);
e900a918b0984e Dan Williams 2019-05-14 121 continue;
e900a918b0984e Dan Williams 2019-05-14 122 }
e900a918b0984e Dan Williams 2019-05-14 123
e900a918b0984e Dan Williams 2019-05-14 124 /*
e900a918b0984e Dan Williams 2019-05-14 125 * Each migratetype corresponds to its own list, make sure the
e900a918b0984e Dan Williams 2019-05-14 126 * types match otherwise we're moving pages to lists where they
e900a918b0984e Dan Williams 2019-05-14 127 * do not belong.
e900a918b0984e Dan Williams 2019-05-14 128 */
e900a918b0984e Dan Williams 2019-05-14 129 migratetype = get_pageblock_migratetype(page_i);
e900a918b0984e Dan Williams 2019-05-14 130 if (get_pageblock_migratetype(page_j) != migratetype) {
e900a918b0984e Dan Williams 2019-05-14 131 pr_debug("%s: migratetype mismatch %#lx\n", __func__, i);
e900a918b0984e Dan Williams 2019-05-14 132 continue;
e900a918b0984e Dan Williams 2019-05-14 133 }
e900a918b0984e Dan Williams 2019-05-14 134
e900a918b0984e Dan Williams 2019-05-14 135 list_swap(&page_i->lru, &page_j->lru);
e900a918b0984e Dan Williams 2019-05-14 136
e900a918b0984e Dan Williams 2019-05-14 137 pr_debug("%s: swap: %#lx -> %#lx\n", __func__, i, j);
e900a918b0984e Dan Williams 2019-05-14 138
e900a918b0984e Dan Williams 2019-05-14 139 /* take it easy on the zone lock */
e900a918b0984e Dan Williams 2019-05-14 140 if ((i % (100 * order_pages)) == 0) {
e900a918b0984e Dan Williams 2019-05-14 141 spin_unlock_irqrestore(&z->lock, flags);
e900a918b0984e Dan Williams 2019-05-14 142 cond_resched();
e900a918b0984e Dan Williams 2019-05-14 143 spin_lock_irqsave(&z->lock, flags);
e900a918b0984e Dan Williams 2019-05-14 144 }
e900a918b0984e Dan Williams 2019-05-14 145 }
e900a918b0984e Dan Williams 2019-05-14 146 spin_unlock_irqrestore(&z->lock, flags);
e900a918b0984e Dan Williams 2019-05-14 147 }
e900a918b0984e Dan Williams 2019-05-14 148
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
On Fri, 27 Feb 2026 05:48:05 +0800 kernel test robot <lkp@intel.com> wrote: > Hi Dmitry, > > kernel test robot noticed the following build errors: > > mm/shuffle.c: In function '__shuffle_zone': yep, thanks. And kernel/power/snapshot.c. I've added fixups.
On Thu, Feb 26, 2026 at 06:26:21PM +0000, Dmitry Ilvokhin wrote: > This intentionally breaks direct users of zone->lock at compile time so > all call sites are converted to the zone lock wrappers. Without the > rename, present and future out-of-tree code could continue using > spin_lock(&zone->lock) and bypass the wrappers and tracing > infrastructure. > > No functional change intended. > > Suggested-by: Andrew Morton <akpm@linux-foundation.org> > Signed-off-by: Dmitry Ilvokhin <d@ilvokhin.com> Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
© 2016 - 2026 Red Hat, Inc.