mm/internal.h | 3 +-- mm/memblock.c | 4 ++-- mm/mm_init.c | 7 +++++-- tools/testing/memblock/internal.h | 3 +-- 4 files changed, 9 insertions(+), 8 deletions(-)
From: Shengming Hu <hu.shengming@zte.com.cn>
memblock_free_pages() currently takes both a struct page * and the
corresponding PFN. The page pointer is always derived from the PFN at
call sites (pfn_to_page(pfn)), making the parameter redundant and also
allowing accidental mismatches between the two arguments.
Simplify the interface by removing the struct page * argument and
deriving the page locally from the PFN, after the deferred struct page
initialization check. This keeps the behavior unchanged while making
the helper harder to misuse.
Signed-off-by: Shengming Hu <hu.shengming@zte.com.cn>
---
mm/internal.h | 3 +--
mm/memblock.c | 4 ++--
mm/mm_init.c | 7 +++++--
tools/testing/memblock/internal.h | 3 +--
4 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index e430da900..5f93ee145 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -742,8 +742,7 @@ static inline void clear_zone_contiguous(struct zone *zone)
extern int __isolate_free_page(struct page *page, unsigned int order);
extern void __putback_isolated_page(struct page *page, unsigned int order,
int mt);
-extern void memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order);
+extern void memblock_free_pages(unsigned long pfn, unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order,
enum meminit_context context);
diff --git a/mm/memblock.c b/mm/memblock.c
index 905d06b16..6e11f81c4 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1771,7 +1771,7 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
end = PFN_DOWN(base + size);
for (; cursor < end; cursor++) {
- memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+ memblock_free_pages(cursor, 0);
totalram_pages_inc();
}
}
@@ -2216,7 +2216,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
while (start + (1UL << order) > end)
order--;
- memblock_free_pages(pfn_to_page(start), start, order);
+ memblock_free_pages(start, order);
start += (1UL << order);
}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index fc2a6f1e5..8e95d65cf 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -2480,9 +2480,10 @@ void *__init alloc_large_system_hash(const char *tablename,
return table;
}
-void __init memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order)
+void __init memblock_free_pages(unsigned long pfn, unsigned int order)
{
+ struct page *page;
+
if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
int nid = early_pfn_to_nid(pfn);
@@ -2490,6 +2491,8 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
return;
}
+ page = pfn_to_page(pfn);
+
if (!kmsan_memblock_free_pages(page, order)) {
/* KMSAN will take care of these pages. */
return;
diff --git a/tools/testing/memblock/internal.h b/tools/testing/memblock/internal.h
index 0ab4b53bb..009b97bbd 100644
--- a/tools/testing/memblock/internal.h
+++ b/tools/testing/memblock/internal.h
@@ -15,8 +15,7 @@ bool mirrored_kernelcore = false;
struct page {};
-void memblock_free_pages(struct page *page, unsigned long pfn,
- unsigned int order)
+void memblock_free_pages(unsigned long pfn, unsigned int order)
{
}
--
2.25.1
On Sun, Dec 28, 2025 at 07:38:50PM +0800, shengminghu512 wrote:
> From: Shengming Hu <hu.shengming@zte.com.cn>
>
> memblock_free_pages() currently takes both a struct page * and the
> corresponding PFN. The page pointer is always derived from the PFN at
> call sites (pfn_to_page(pfn)), making the parameter redundant and also
> allowing accidental mismatches between the two arguments.
>
> Simplify the interface by removing the struct page * argument and
> deriving the page locally from the PFN, after the deferred struct page
> initialization check. This keeps the behavior unchanged while making
> the helper harder to misuse.
>
> Signed-off-by: Shengming Hu <hu.shengming@zte.com.cn>
> ---
> mm/internal.h | 3 +--
> mm/memblock.c | 4 ++--
> mm/mm_init.c | 7 +++++--
> tools/testing/memblock/internal.h | 3 +--
> 4 files changed, 9 insertions(+), 8 deletions(-)
>
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -2480,9 +2480,10 @@ void *__init alloc_large_system_hash(const char *tablename,
> return table;
> }
>
> -void __init memblock_free_pages(struct page *page, unsigned long pfn,
> - unsigned int order)
> +void __init memblock_free_pages(unsigned long pfn, unsigned int order)
> {
> + struct page *page;
page can be initialized here, the compiler should figure out how to avoid
extra instructions.
> +
> if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) {
> int nid = early_pfn_to_nid(pfn);
>
> @@ -2490,6 +2491,8 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
> return;
> }
>
> + page = pfn_to_page(pfn);
> +
> if (!kmsan_memblock_free_pages(page, order)) {
> /* KMSAN will take care of these pages. */
> return;
--
Sincerely yours,
Mike.
© 2016 - 2026 Red Hat, Inc.