kernel/kexec_handover.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-)
When restoring a folio using kho_restore_folio(), no sanity checks are
done to make sure the folio actually came from a kexec handover. The
caller is trusted to pass in the right address. If the caller has a bug
and passes in a wrong address, an in-use folio might be "restored" and
returned, causing all sorts of memory corruption.
Harden the folio restore logic by stashing in a magic number in
page->private along with the folio order. If the magic number does not
match, the folio won't be touched. page->private is an unsigned long.
The union kho_page_info splits it into two parts, with one holding the
order and the other holding the magic number.
Signed-off-by: Pratyush Yadav <pratyush@kernel.org>
---
kernel/kexec_handover.c | 29 ++++++++++++++++++++++++-----
1 file changed, 24 insertions(+), 5 deletions(-)
diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c
index ecd1ac210dbd7..68eb3c28abe41 100644
--- a/kernel/kexec_handover.c
+++ b/kernel/kexec_handover.c
@@ -32,6 +32,22 @@
#define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map"
#define PROP_SUB_FDT "fdt"
+#define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */
+
+/*
+ * KHO uses page->private, which is an unsigned long, to store page metadata.
+ * Use it to store both the magic and the order.
+ */
+union kho_page_info {
+ unsigned long page_private;
+ struct {
+ unsigned int order;
+ unsigned int magic;
+ };
+};
+
+static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private));
+
static bool kho_enable __ro_after_init;
bool kho_is_enabled(void)
@@ -210,16 +226,16 @@ static void kho_restore_page(struct page *page, unsigned int order)
struct folio *kho_restore_folio(phys_addr_t phys)
{
struct page *page = pfn_to_online_page(PHYS_PFN(phys));
- unsigned long order;
+ union kho_page_info info;
if (!page)
return NULL;
- order = page->private;
- if (order > MAX_PAGE_ORDER)
+ info.page_private = page->private;
+ if (info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER)
return NULL;
- kho_restore_page(page, order);
+ kho_restore_page(page, info.order);
return page_folio(page);
}
EXPORT_SYMBOL_GPL(kho_restore_folio);
@@ -341,10 +357,13 @@ static void __init deserialize_bitmap(unsigned int order,
phys_addr_t phys =
elm->phys_start + (bit << (order + PAGE_SHIFT));
struct page *page = phys_to_page(phys);
+ union kho_page_info info;
memblock_reserve(phys, sz);
memblock_reserved_mark_noinit(phys, sz);
- page->private = order;
+ info.magic = KHO_PAGE_MAGIC;
+ info.order = order;
+ page->private = info.page_private;
}
}
base-commit: b320789d6883cc00ac78ce83bccbfe7ed58afcf0
--
2.47.3
On Wed, Sep 10, 2025 at 05:34:40PM +0200, Pratyush Yadav wrote: > +#define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */ > + > +/* > + * KHO uses page->private, which is an unsigned long, to store page metadata. > + * Use it to store both the magic and the order. > + */ > +union kho_page_info { > + unsigned long page_private; > + struct { > + unsigned int order; > + unsigned int magic; > + }; KHO is only supported on 64-bit? > @@ -210,16 +226,16 @@ static void kho_restore_page(struct page *page, unsigned int order) > struct folio *kho_restore_folio(phys_addr_t phys) > { > struct page *page = pfn_to_online_page(PHYS_PFN(phys)); > - unsigned long order; > + union kho_page_info info; > > if (!page) > return NULL; > > - order = page->private; > - if (order > MAX_PAGE_ORDER) > + info.page_private = page->private; > + if (info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER) > return NULL; > > - kho_restore_page(page, order); > + kho_restore_page(page, info.order); > return page_folio(page); This all looks very confused. Before your patch as well as after it. I don't see anything in the current KHO code that requires the phys_addr_t to be order-aligned.
On Wed, Sep 10 2025, Matthew Wilcox wrote: > On Wed, Sep 10, 2025 at 05:34:40PM +0200, Pratyush Yadav wrote: >> +#define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */ >> + >> +/* >> + * KHO uses page->private, which is an unsigned long, to store page metadata. >> + * Use it to store both the magic and the order. >> + */ >> +union kho_page_info { >> + unsigned long page_private; >> + struct { >> + unsigned int order; >> + unsigned int magic; >> + }; > > KHO is only supported on 64-bit? Yes. Currently only x86_64 and ARM64. It is mainly for hypervisor live update so there isn't much reason to support it on 32-bit platforms. > >> @@ -210,16 +226,16 @@ static void kho_restore_page(struct page *page, unsigned int order) >> struct folio *kho_restore_folio(phys_addr_t phys) >> { >> struct page *page = pfn_to_online_page(PHYS_PFN(phys)); >> - unsigned long order; >> + union kho_page_info info; >> >> if (!page) >> return NULL; >> >> - order = page->private; >> - if (order > MAX_PAGE_ORDER) >> + info.page_private = page->private; >> + if (info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER) >> return NULL; >> >> - kho_restore_page(page, order); >> + kho_restore_page(page, info.order); >> return page_folio(page); > > This all looks very confused. Before your patch as well as after it. > I don't see anything in the current KHO code that requires the > phys_addr_t to be order-aligned. Right, good point. I can send that as a follow up patch. But I think this patch stands on its own without that fix too. -- Regards, Pratyush Yadav
On Wed, Sep 10, 2025 at 05:52:04PM +0200, Pratyush Yadav wrote: > On Wed, Sep 10 2025, Matthew Wilcox wrote: > > > On Wed, Sep 10, 2025 at 05:34:40PM +0200, Pratyush Yadav wrote: > >> +#define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */ > >> + > >> +/* > >> + * KHO uses page->private, which is an unsigned long, to store page metadata. > >> + * Use it to store both the magic and the order. > >> + */ > >> +union kho_page_info { > >> + unsigned long page_private; > >> + struct { > >> + unsigned int order; > >> + unsigned int magic; > >> + }; > > > > KHO is only supported on 64-bit? > > Yes. Currently only x86_64 and ARM64. It is mainly for hypervisor live > update so there isn't much reason to support it on 32-bit platforms. Presumably this will eventually change to use some special coding on the memdesc pointer? > >> @@ -210,16 +226,16 @@ static void kho_restore_page(struct page *page, unsigned int order) > >> struct folio *kho_restore_folio(phys_addr_t phys) > >> { > >> struct page *page = pfn_to_online_page(PHYS_PFN(phys)); > >> - unsigned long order; > >> + union kho_page_info info; > >> > >> if (!page) > >> return NULL; > >> > >> - order = page->private; > >> - if (order > MAX_PAGE_ORDER) > >> + info.page_private = page->private; > >> + if (info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER) All the impossible checks shoudl be WARN_ON() > >> return NULL; > >> > >> - kho_restore_page(page, order); > >> + kho_restore_page(page, info.order); > >> return page_folio(page); > > > > This all looks very confused. Before your patch as well as after it. > > I don't see anything in the current KHO code that requires the > > phys_addr_t to be order-aligned. > > Right, good point. I can send that as a follow up patch. But I think > this patch stands on its own without that fix too. Maybe it is worth adding some KHO_DEBUG kconfig to protect some of these extra checks? phys should be pfn_valid, phys should be aligned, the page should be in the right state, order should be valid, etc. All worth checking. Jason
© 2016 - 2025 Red Hat, Inc.