With 4k pages, a 32-bit nr_pages can span up to 16 TiB. While it is a
lot, there exist systems with terabytes of RAM. gup is also moving to
using long for nr_pages. Use unsigned long and make KHO future-proof.
Suggested-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Pratyush Yadav <pratyush@kernel.org>
---
Changes in v2:
- New in v2.
include/linux/kexec_handover.h | 6 +++---
kernel/liveupdate/kexec_handover.c | 11 ++++++-----
2 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
index 5f7b9de97e8d..81814aa92370 100644
--- a/include/linux/kexec_handover.h
+++ b/include/linux/kexec_handover.h
@@ -45,15 +45,15 @@ bool is_kho_boot(void);
int kho_preserve_folio(struct folio *folio);
void kho_unpreserve_folio(struct folio *folio);
-int kho_preserve_pages(struct page *page, unsigned int nr_pages);
-void kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
+int kho_preserve_pages(struct page *page, unsigned long nr_pages);
+void kho_unpreserve_pages(struct page *page, unsigned long nr_pages);
int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
void *kho_alloc_preserve(size_t size);
void kho_unpreserve_free(void *mem);
void kho_restore_free(void *mem);
struct folio *kho_restore_folio(phys_addr_t phys);
-struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
+struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages);
void *kho_restore_vmalloc(const struct kho_vmalloc *preservation);
int kho_add_subtree(const char *name, void *fdt);
void kho_remove_subtree(void *fdt);
diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c
index 9dc51fab604f..709484fbf9fd 100644
--- a/kernel/liveupdate/kexec_handover.c
+++ b/kernel/liveupdate/kexec_handover.c
@@ -222,7 +222,8 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
{
struct page *page = pfn_to_online_page(PHYS_PFN(phys));
- unsigned int nr_pages, ref_cnt;
+ unsigned long nr_pages;
+ unsigned int ref_cnt;
union kho_page_info info;
if (!page)
@@ -249,7 +250,7 @@ static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
* count of 1
*/
ref_cnt = is_folio ? 0 : 1;
- for (unsigned int i = 1; i < nr_pages; i++)
+ for (unsigned long i = 1; i < nr_pages; i++)
set_page_count(page + i, ref_cnt);
if (is_folio && info.order)
@@ -283,7 +284,7 @@ EXPORT_SYMBOL_GPL(kho_restore_folio);
*
* Return: 0 on success, error code on failure
*/
-struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
+struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages)
{
const unsigned long start_pfn = PHYS_PFN(phys);
const unsigned long end_pfn = start_pfn + nr_pages;
@@ -829,7 +830,7 @@ EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
*
* Return: 0 on success, error code on failure
*/
-int kho_preserve_pages(struct page *page, unsigned int nr_pages)
+int kho_preserve_pages(struct page *page, unsigned long nr_pages)
{
struct kho_mem_track *track = &kho_out.track;
const unsigned long start_pfn = page_to_pfn(page);
@@ -873,7 +874,7 @@ EXPORT_SYMBOL_GPL(kho_preserve_pages);
* kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
* preserved blocks is not supported.
*/
-void kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
+void kho_unpreserve_pages(struct page *page, unsigned long nr_pages)
{
struct kho_mem_track *track = &kho_out.track;
const unsigned long start_pfn = page_to_pfn(page);
--
2.52.0.457.g6b5491de43-goog
> @@ -222,7 +222,8 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
> static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
> {
> struct page *page = pfn_to_online_page(PHYS_PFN(phys));
> - unsigned int nr_pages, ref_cnt;
> + unsigned long nr_pages;
> + unsigned int ref_cnt;
> union kho_page_info info;
>
> if (!page)
> @@ -249,7 +250,7 @@ static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
> * count of 1
> */
> ref_cnt = is_folio ? 0 : 1;
> - for (unsigned int i = 1; i < nr_pages; i++)
> + for (unsigned long i = 1; i < nr_pages; i++)
> set_page_count(page + i, ref_cnt);
>
> if (is_folio && info.order)
> @@ -283,7 +284,7 @@ EXPORT_SYMBOL_GPL(kho_restore_folio);
> *
> * Return: 0 on success, error code on failure
> */
> -struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
> +struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages)
> {
> const unsigned long start_pfn = PHYS_PFN(phys);
> const unsigned long end_pfn = start_pfn + nr_pages;
> @@ -829,7 +830,7 @@ EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
> *
> * Return: 0 on success, error code on failure
> */
> -int kho_preserve_pages(struct page *page, unsigned int nr_pages)
> +int kho_preserve_pages(struct page *page, unsigned long nr_pages)
> {
> struct kho_mem_track *track = &kho_out.track;
> const unsigned long start_pfn = page_to_pfn(page);
> @@ -873,7 +874,7 @@ EXPORT_SYMBOL_GPL(kho_preserve_pages);
> * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
> * preserved blocks is not supported.
> */
> -void kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
> +void kho_unpreserve_pages(struct page *page, unsigned long nr_pages)
Reviewed-by: Pasha Tatashin <pasha.tatashin@soleen.com>
On Fri, Jan 16, 2026 at 11:22:14AM +0000, Pratyush Yadav wrote:
> With 4k pages, a 32-bit nr_pages can span up to 16 TiB. While it is a
> lot, there exist systems with terabytes of RAM. gup is also moving to
> using long for nr_pages. Use unsigned long and make KHO future-proof.
>
> Suggested-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> Signed-off-by: Pratyush Yadav <pratyush@kernel.org>
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---
>
> Changes in v2:
> - New in v2.
>
> include/linux/kexec_handover.h | 6 +++---
> kernel/liveupdate/kexec_handover.c | 11 ++++++-----
> 2 files changed, 9 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
> index 5f7b9de97e8d..81814aa92370 100644
> --- a/include/linux/kexec_handover.h
> +++ b/include/linux/kexec_handover.h
> @@ -45,15 +45,15 @@ bool is_kho_boot(void);
>
> int kho_preserve_folio(struct folio *folio);
> void kho_unpreserve_folio(struct folio *folio);
> -int kho_preserve_pages(struct page *page, unsigned int nr_pages);
> -void kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
> +int kho_preserve_pages(struct page *page, unsigned long nr_pages);
> +void kho_unpreserve_pages(struct page *page, unsigned long nr_pages);
> int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
> void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
> void *kho_alloc_preserve(size_t size);
> void kho_unpreserve_free(void *mem);
> void kho_restore_free(void *mem);
> struct folio *kho_restore_folio(phys_addr_t phys);
> -struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
> +struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages);
> void *kho_restore_vmalloc(const struct kho_vmalloc *preservation);
> int kho_add_subtree(const char *name, void *fdt);
> void kho_remove_subtree(void *fdt);
> diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c
> index 9dc51fab604f..709484fbf9fd 100644
> --- a/kernel/liveupdate/kexec_handover.c
> +++ b/kernel/liveupdate/kexec_handover.c
> @@ -222,7 +222,8 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
> static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
> {
> struct page *page = pfn_to_online_page(PHYS_PFN(phys));
> - unsigned int nr_pages, ref_cnt;
> + unsigned long nr_pages;
> + unsigned int ref_cnt;
> union kho_page_info info;
>
> if (!page)
> @@ -249,7 +250,7 @@ static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
> * count of 1
> */
> ref_cnt = is_folio ? 0 : 1;
> - for (unsigned int i = 1; i < nr_pages; i++)
> + for (unsigned long i = 1; i < nr_pages; i++)
> set_page_count(page + i, ref_cnt);
>
> if (is_folio && info.order)
> @@ -283,7 +284,7 @@ EXPORT_SYMBOL_GPL(kho_restore_folio);
> *
> * Return: 0 on success, error code on failure
> */
> -struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
> +struct page *kho_restore_pages(phys_addr_t phys, unsigned long nr_pages)
> {
> const unsigned long start_pfn = PHYS_PFN(phys);
> const unsigned long end_pfn = start_pfn + nr_pages;
> @@ -829,7 +830,7 @@ EXPORT_SYMBOL_GPL(kho_unpreserve_folio);
> *
> * Return: 0 on success, error code on failure
> */
> -int kho_preserve_pages(struct page *page, unsigned int nr_pages)
> +int kho_preserve_pages(struct page *page, unsigned long nr_pages)
> {
> struct kho_mem_track *track = &kho_out.track;
> const unsigned long start_pfn = page_to_pfn(page);
> @@ -873,7 +874,7 @@ EXPORT_SYMBOL_GPL(kho_preserve_pages);
> * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger
> * preserved blocks is not supported.
> */
> -void kho_unpreserve_pages(struct page *page, unsigned int nr_pages)
> +void kho_unpreserve_pages(struct page *page, unsigned long nr_pages)
> {
> struct kho_mem_track *track = &kho_out.track;
> const unsigned long start_pfn = page_to_pfn(page);
> --
> 2.52.0.457.g6b5491de43-goog
>
--
Sincerely yours,
Mike.
On Fri, 16 Jan 2026 11:22:14 +0000 Pratyush Yadav <pratyush@kernel.org> wrote: > With 4k pages, a 32-bit nr_pages can span up to 16 TiB. While it is a > lot, there exist systems with terabytes of RAM. gup is also moving to > using long for nr_pages. Use unsigned long and make KHO future-proof. We can expect people to be using LTS kernel five years from now, perhaps much longer. Machines will be bigger then! IOW, shouldn't we backport this?
On Fri, Jan 16, 2026 at 02:26:35PM -0800, Andrew Morton wrote: > On Fri, 16 Jan 2026 11:22:14 +0000 Pratyush Yadav <pratyush@kernel.org> wrote: > > > With 4k pages, a 32-bit nr_pages can span up to 16 TiB. While it is a > > lot, there exist systems with terabytes of RAM. gup is also moving to > > using long for nr_pages. Use unsigned long and make KHO future-proof. > > We can expect people to be using LTS kernel five years from now, > perhaps much longer. Machines will be bigger then! > > IOW, shouldn't we backport this? The latest LTS is 6.12 that still does not have KHO, I don't think it makes sense to backport this to 6.18. -- Sincerely yours, Mike.
© 2016 - 2026 Red Hat, Inc.