From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
When contiguous ranges of order-0 pages are restored, kho_restore_page()
calls prep_compound_page() with the first page in the range and order as
parameters and then kho_restore_pages() calls split_page() to make sure all
pages in the range are order-0.
However, since split_page() is not intended to split compound pages and
with VM_DEBUG enabled it will trigger a VM_BUG_ON_PAGE().
Update kho_restore_page() so that it will use prep_compound_page() when it
restores a folio and make sure it properly sets page count for both large
folios and ranges of order-0 pages.
Reported-by: Pratyush Yadav <pratyush@kernel.org>
Fixes: a667300bd53f ("kho: add support for preserving vmalloc allocations")
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
kernel/liveupdate/kexec_handover.c | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)
diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c
index e64ee87fa62a..61d17ed1f423 100644
--- a/kernel/liveupdate/kexec_handover.c
+++ b/kernel/liveupdate/kexec_handover.c
@@ -219,11 +219,11 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
return 0;
}
-static struct page *kho_restore_page(phys_addr_t phys)
+static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
{
struct page *page = pfn_to_online_page(PHYS_PFN(phys));
+ unsigned int nr_pages, ref_cnt;
union kho_page_info info;
- unsigned int nr_pages;
if (!page)
return NULL;
@@ -243,11 +243,16 @@ static struct page *kho_restore_page(phys_addr_t phys)
/* Head page gets refcount of 1. */
set_page_count(page, 1);
- /* For higher order folios, tail pages get a page count of zero. */
+ /*
+ * For higher order folios, tail pages get a page count of zero.
+ * For physically contiguous order-0 pages every pages gets a page
+ * count of 1
+ */
+ ref_cnt = is_folio ? 0 : 1;
for (unsigned int i = 1; i < nr_pages; i++)
- set_page_count(page + i, 0);
+ set_page_count(page + i, ref_cnt);
- if (info.order > 0)
+ if (is_folio && info.order)
prep_compound_page(page, info.order);
adjust_managed_page_count(page, nr_pages);
@@ -262,7 +267,7 @@ static struct page *kho_restore_page(phys_addr_t phys)
*/
struct folio *kho_restore_folio(phys_addr_t phys)
{
- struct page *page = kho_restore_page(phys);
+ struct page *page = kho_restore_page(phys, true);
return page ? page_folio(page) : NULL;
}
@@ -287,11 +292,10 @@ struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
while (pfn < end_pfn) {
const unsigned int order =
min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
- struct page *page = kho_restore_page(PFN_PHYS(pfn));
+ struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
if (!page)
return NULL;
- split_page(page, order);
pfn += 1 << order;
}
--
2.50.1
On Tue, Nov 25 2025, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
>
> When contiguous ranges of order-0 pages are restored, kho_restore_page()
> calls prep_compound_page() with the first page in the range and order as
> parameters and then kho_restore_pages() calls split_page() to make sure all
> pages in the range are order-0.
>
> However, since split_page() is not intended to split compound pages and
> with VM_DEBUG enabled it will trigger a VM_BUG_ON_PAGE().
>
> Update kho_restore_page() so that it will use prep_compound_page() when it
> restores a folio and make sure it properly sets page count for both large
> folios and ranges of order-0 pages.
>
> Reported-by: Pratyush Yadav <pratyush@kernel.org>
> Fixes: a667300bd53f ("kho: add support for preserving vmalloc allocations")
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---
> kernel/liveupdate/kexec_handover.c | 20 ++++++++++++--------
> 1 file changed, 12 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c
> index e64ee87fa62a..61d17ed1f423 100644
> --- a/kernel/liveupdate/kexec_handover.c
> +++ b/kernel/liveupdate/kexec_handover.c
> @@ -219,11 +219,11 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
> return 0;
> }
>
> -static struct page *kho_restore_page(phys_addr_t phys)
> +static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
> {
> struct page *page = pfn_to_online_page(PHYS_PFN(phys));
> + unsigned int nr_pages, ref_cnt;
> union kho_page_info info;
> - unsigned int nr_pages;
>
> if (!page)
> return NULL;
> @@ -243,11 +243,16 @@ static struct page *kho_restore_page(phys_addr_t phys)
> /* Head page gets refcount of 1. */
> set_page_count(page, 1);
>
> - /* For higher order folios, tail pages get a page count of zero. */
> + /*
> + * For higher order folios, tail pages get a page count of zero.
> + * For physically contiguous order-0 pages every pages gets a page
> + * count of 1
> + */
> + ref_cnt = is_folio ? 0 : 1;
> for (unsigned int i = 1; i < nr_pages; i++)
> - set_page_count(page + i, 0);
> + set_page_count(page + i, ref_cnt);
>
> - if (info.order > 0)
> + if (is_folio && info.order)
This is getting a bit difficult to parse. Let's separate out folio and
page initialization to separate helpers:
/* Initalize 0-order KHO pages */
static void kho_init_page(struct page *page, unsigned int nr_pages)
{
for (unsigned int i = 0; i < nr_pages; i++)
set_page_count(page + i, 1);
}
static void kho_init_folio(struct page *page, unsigned int order)
{
unsigned int nr_pages = (1 << order);
/* Head page gets refcount of 1. */
set_page_count(page, 1);
/* For higher order folios, tail pages get a page count of zero. */
for (unsigned int i = 1; i < nr_pages; i++)
set_page_count(page + i, 0);
if (order > 0)
prep_compound_page(page, order);
}
> prep_compound_page(page, info.order);
>
> adjust_managed_page_count(page, nr_pages);
> @@ -262,7 +267,7 @@ static struct page *kho_restore_page(phys_addr_t phys)
> */
> struct folio *kho_restore_folio(phys_addr_t phys)
> {
> - struct page *page = kho_restore_page(phys);
> + struct page *page = kho_restore_page(phys, true);
>
> return page ? page_folio(page) : NULL;
> }
> @@ -287,11 +292,10 @@ struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
> while (pfn < end_pfn) {
> const unsigned int order =
> min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
> - struct page *page = kho_restore_page(PFN_PHYS(pfn));
> + struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
>
> if (!page)
> return NULL;
> - split_page(page, order);
> pfn += 1 << order;
> }
--
Regards,
Pratyush Yadav
Hi Pratyush,
On Tue, Nov 25, 2025 at 02:45:59PM +0100, Pratyush Yadav wrote:
> On Tue, Nov 25 2025, Mike Rapoport wrote:
...
> > @@ -243,11 +243,16 @@ static struct page *kho_restore_page(phys_addr_t phys)
> > /* Head page gets refcount of 1. */
> > set_page_count(page, 1);
> >
> > - /* For higher order folios, tail pages get a page count of zero. */
> > + /*
> > + * For higher order folios, tail pages get a page count of zero.
> > + * For physically contiguous order-0 pages every pages gets a page
> > + * count of 1
> > + */
> > + ref_cnt = is_folio ? 0 : 1;
> > for (unsigned int i = 1; i < nr_pages; i++)
> > - set_page_count(page + i, 0);
> > + set_page_count(page + i, ref_cnt);
> >
> > - if (info.order > 0)
> > + if (is_folio && info.order)
>
> This is getting a bit difficult to parse. Let's separate out folio and
> page initialization to separate helpers:
Sorry, I've missed this earlier and now the patches are in akpm's -stable
branch.
Let's postpone these changes for the next cycle, maybe along with support
for deferred initialization of struct page.
> /* Initalize 0-order KHO pages */
> static void kho_init_page(struct page *page, unsigned int nr_pages)
> {
> for (unsigned int i = 0; i < nr_pages; i++)
> set_page_count(page + i, 1);
> }
>
> static void kho_init_folio(struct page *page, unsigned int order)
> {
> unsigned int nr_pages = (1 << order);
>
> /* Head page gets refcount of 1. */
> set_page_count(page, 1);
>
> /* For higher order folios, tail pages get a page count of zero. */
> for (unsigned int i = 1; i < nr_pages; i++)
> set_page_count(page + i, 0);
>
> if (order > 0)
> prep_compound_page(page, order);
> }
--
Sincerely yours,
Mike.
On Mon, Dec 01 2025, Mike Rapoport wrote: > Hi Pratyush, > > On Tue, Nov 25, 2025 at 02:45:59PM +0100, Pratyush Yadav wrote: >> On Tue, Nov 25 2025, Mike Rapoport wrote: > > ... > >> > @@ -243,11 +243,16 @@ static struct page *kho_restore_page(phys_addr_t phys) >> > /* Head page gets refcount of 1. */ >> > set_page_count(page, 1); >> > >> > - /* For higher order folios, tail pages get a page count of zero. */ >> > + /* >> > + * For higher order folios, tail pages get a page count of zero. >> > + * For physically contiguous order-0 pages every pages gets a page >> > + * count of 1 >> > + */ >> > + ref_cnt = is_folio ? 0 : 1; >> > for (unsigned int i = 1; i < nr_pages; i++) >> > - set_page_count(page + i, 0); >> > + set_page_count(page + i, ref_cnt); >> > >> > - if (info.order > 0) >> > + if (is_folio && info.order) >> >> This is getting a bit difficult to parse. Let's separate out folio and >> page initialization to separate helpers: > > Sorry, I've missed this earlier and now the patches are in akpm's -stable > branch. > Let's postpone these changes for the next cycle, maybe along with support > for deferred initialization of struct page. Sure, no problem. [...] -- Regards, Pratyush Yadav
© 2016 - 2025 Red Hat, Inc.