From nobody Thu Oct 2 11:57:50 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 446A627E040 for ; Wed, 17 Sep 2025 17:40:52 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758130852; cv=none; b=n0ceGlaN4eW4uAFyjYOe77le4ACpjL7Jw5vPV/kWC/1RDAubqd2ExYxzjs/3HhbEwp1ST/9SosSEHOENaDzl9tceH8UhfeJ6mrXm8Dta4AQjNN5O8WLEsw7LkTxJqzEw8FdmJ9O0xX5hG6Kg0aY+WbCrpR5vfecpV/zGslvlTxM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1758130852; c=relaxed/simple; bh=g9UTSU7mKXHMD706ZTpvo0p32ufAy0oIGmLbfXKXhCc=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=OrEjvnQj37c6i9IWH4wjQ7CRwsjeNCYf3GMUeMWH6UPyv0kbJ+wWQs/bcjZVv31bjQUvf0qXXzdrQk6YeL5bIKGdwjpjzuFo5+xh9IJNKT2gw8l3ZxfYV2J+hEgbrlEQvcfjVAkNVaKP3vS+94w9IXWHlBV+NxraqgADRTNAIto= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=FAXZMEr8; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="FAXZMEr8" Received: by smtp.kernel.org (Postfix) with ESMTPSA id B7778C4CEFA; Wed, 17 Sep 2025 17:40:48 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1758130851; bh=g9UTSU7mKXHMD706ZTpvo0p32ufAy0oIGmLbfXKXhCc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=FAXZMEr8KZZlgSJlbISRNQ33VJ5g0BYw/qXtmKT3Bikjm7rG0i2jlf5Gurdno4AxR 1r7kxPBsntpO+BCU/Afz6T7L1GuWNbW2ASSlVJPGeoEEVdwH6PWvanekV7UeDfr1MC s+HFoBm3q2nnY5yzvnWNK3AMVV58pLJXAG9CZC0zqI0YQJJKqsBstjKijtb03VWxAt Ulqw2jU+BDrUiqkrOlSVKXRt9iBQOTgapRnvGICFjPgDdlL54+mwEl07bcKzV4T9K4 aMxkUacPkUj4Pl31p3MHYqNOrU9MFGOa5mEiXdv38InkEV95ZCH20w7oSNVHlrIMtP EzA1Q7VoDi21Q== From: Mike Rapoport To: Andrew Morton Cc: Alexander Graf , Baoquan He , Changyuan Lyu , Chris Li , Jason Gunthorpe , Mike Rapoport , Pasha Tatashin , Pratyush Yadav , kexec@lists.infradead.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v4 3/4] kho: add support for preserving vmalloc allocations Date: Wed, 17 Sep 2025 20:40:32 +0300 Message-ID: <20250917174033.3810435-4-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20250917174033.3810435-1-rppt@kernel.org> References: <20250917174033.3810435-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" A vmalloc allocation is preserved using binary structure similar to global KHO memory tracker. It's a linked list of pages where each page is an array of physical address of pages in vmalloc area. kho_preserve_vmalloc() hands out the physical address of the head page to the caller. This address is used as the argument to kho_vmalloc_restore() to restore the mapping in the vmalloc address space and populate it with the preserved pages. Signed-off-by: Mike Rapoport (Microsoft) Reviewed-by: Pratyush Yadav --- include/linux/kexec_handover.h | 21 +++ kernel/kexec_handover.c | 244 +++++++++++++++++++++++++++++++++ 2 files changed, 265 insertions(+) diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h index cc5c49b0612b..1cb515d8257a 100644 --- a/include/linux/kexec_handover.h +++ b/include/linux/kexec_handover.h @@ -39,12 +39,22 @@ struct page; =20 struct kho_serialization; =20 +struct kho_vmalloc_chunk; +struct kho_vmalloc { + DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *); + unsigned int total_pages; + unsigned short flags; + unsigned short order; +}; + #ifdef CONFIG_KEXEC_HANDOVER bool kho_is_enabled(void); =20 int kho_preserve_folio(struct folio *folio); int kho_preserve_pages(struct page *page, unsigned int nr_pages); +int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation); struct folio *kho_restore_folio(phys_addr_t phys); +void *kho_restore_vmalloc(const struct kho_vmalloc *preservation); int kho_add_subtree(struct kho_serialization *ser, const char *name, void = *fdt); int kho_retrieve_subtree(const char *name, phys_addr_t *phys); =20 @@ -71,11 +81,22 @@ static inline int kho_preserve_pages(struct page *page,= unsigned int nr_pages) return -EOPNOTSUPP; } =20 +static inline int kho_preserve_vmalloc(void *ptr, + struct kho_vmalloc *preservation) +{ + return -EOPNOTSUPP; +} + static inline struct folio *kho_restore_folio(phys_addr_t phys) { return NULL; } =20 +static inline void *kho_restore_vmalloc(const struct kho_vmalloc *preserva= tion) +{ + return NULL; +} + static inline int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt) { diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c index 3ad59c5f9eaa..d670caf6d07f 100644 --- a/kernel/kexec_handover.c +++ b/kernel/kexec_handover.c @@ -18,6 +18,7 @@ #include #include #include +#include =20 #include =20 @@ -736,6 +737,249 @@ int kho_preserve_pages(struct page *page, unsigned in= t nr_pages) } EXPORT_SYMBOL_GPL(kho_preserve_pages); =20 +struct kho_vmalloc_hdr { + DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *); +}; + +#define KHO_VMALLOC_SIZE \ + ((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \ + sizeof(phys_addr_t)) + +struct kho_vmalloc_chunk { + struct kho_vmalloc_hdr hdr; + phys_addr_t phys[KHO_VMALLOC_SIZE]; +}; + +static_assert(sizeof(struct kho_vmalloc_chunk) =3D=3D PAGE_SIZE); + +/* vmalloc flags KHO supports */ +#define KHO_VMALLOC_SUPPORTED_FLAGS (VM_ALLOC | VM_ALLOW_HUGE_VMAP) + +/* KHO internal flags for vmalloc preservations */ +#define KHO_VMALLOC_ALLOC 0x0001 +#define KHO_VMALLOC_HUGE_VMAP 0x0002 + +static unsigned short vmalloc_flags_to_kho(unsigned int vm_flags) +{ + unsigned short kho_flags =3D 0; + + if (vm_flags & VM_ALLOC) + kho_flags |=3D KHO_VMALLOC_ALLOC; + if (vm_flags & VM_ALLOW_HUGE_VMAP) + kho_flags |=3D KHO_VMALLOC_HUGE_VMAP; + + return kho_flags; +} + +static unsigned int kho_flags_to_vmalloc(unsigned short kho_flags) +{ + unsigned int vm_flags =3D 0; + + if (kho_flags & KHO_VMALLOC_ALLOC) + vm_flags |=3D VM_ALLOC; + if (kho_flags & KHO_VMALLOC_HUGE_VMAP) + vm_flags |=3D VM_ALLOW_HUGE_VMAP; + + return vm_flags; +} + +static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chun= k *cur) +{ + struct kho_vmalloc_chunk *chunk; + int err; + + chunk =3D (struct kho_vmalloc_chunk *)get_zeroed_page(GFP_KERNEL); + if (!chunk) + return NULL; + + err =3D kho_preserve_pages(virt_to_page(chunk), 1); + if (err) + goto err_free; + if (cur) + KHOSER_STORE_PTR(cur->hdr.next, chunk); + return chunk; + +err_free: + free_page((unsigned long)chunk); + return NULL; +} + +static void kho_vmalloc_unpreserve_chunk(struct kho_vmalloc_chunk *chunk) +{ + struct kho_mem_track *track =3D &kho_out.ser.track; + unsigned long pfn =3D PHYS_PFN(virt_to_phys(chunk)); + + __kho_unpreserve(track, pfn, pfn + 1); + + for (int i =3D 0; chunk->phys[i]; i++) { + pfn =3D PHYS_PFN(chunk->phys[i]); + __kho_unpreserve(track, pfn, pfn + 1); + } +} + +static void kho_vmalloc_free_chunks(struct kho_vmalloc *kho_vmalloc) +{ + struct kho_vmalloc_chunk *chunk =3D KHOSER_LOAD_PTR(kho_vmalloc->first); + + while (chunk) { + struct kho_vmalloc_chunk *tmp =3D chunk; + + kho_vmalloc_unpreserve_chunk(chunk); + + chunk =3D KHOSER_LOAD_PTR(chunk->hdr.next); + kfree(tmp); + } +} + +/** + * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across = kexec + * @ptr: pointer to the area in vmalloc address space + * @preservation: placeholder for preservation metadata + * + * Instructs KHO to preserve the area in vmalloc address space at @ptr. The + * physical pages mapped at @ptr will be preserved and on successful return + * @preservation will hold the physical address of a structure that descri= bes + * the preservation. + * + * NOTE: The memory allocated with vmalloc_node() variants cannot be relia= bly + * restored on the same node + * + * Return: 0 on success, error code on failure + */ +int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation) +{ + struct kho_vmalloc_chunk *chunk; + struct vm_struct *vm =3D find_vm_area(ptr); + unsigned int order, flags, nr_contig_pages; + unsigned int idx =3D 0; + int err; + + if (!vm) + return -EINVAL; + + if (vm->flags & ~KHO_VMALLOC_SUPPORTED_FLAGS) + return -EOPNOTSUPP; + + flags =3D vmalloc_flags_to_kho(vm->flags); + order =3D get_vm_area_page_order(vm); + + chunk =3D new_vmalloc_chunk(NULL); + if (!chunk) + return -ENOMEM; + KHOSER_STORE_PTR(preservation->first, chunk); + + nr_contig_pages =3D (1 << order); + for (int i =3D 0; i < vm->nr_pages; i +=3D nr_contig_pages) { + phys_addr_t phys =3D page_to_phys(vm->pages[i]); + + err =3D kho_preserve_pages(vm->pages[i], nr_contig_pages); + if (err) + goto err_free; + + chunk->phys[idx++] =3D phys; + if (idx =3D=3D ARRAY_SIZE(chunk->phys)) { + chunk =3D new_vmalloc_chunk(chunk); + if (!chunk) + goto err_free; + idx =3D 0; + } + } + + preservation->total_pages =3D vm->nr_pages; + preservation->flags =3D flags; + preservation->order =3D order; + + return 0; + +err_free: + kho_vmalloc_free_chunks(preservation); + return err; +} +EXPORT_SYMBOL_GPL(kho_preserve_vmalloc); + +/** + * kho_restore_vmalloc - recreates and populates an area in vmalloc address + * space from the preserved memory. + * @preservation: preservation metadata. + * + * Recreates an area in vmalloc address space and populates it with memory= that + * was preserved using kho_preserve_vmalloc(). + * + * Return: pointer to the area in the vmalloc address space, NULL on failu= re. + */ +void *kho_restore_vmalloc(const struct kho_vmalloc *preservation) +{ + struct kho_vmalloc_chunk *chunk =3D KHOSER_LOAD_PTR(preservation->first); + unsigned int align, order, shift, vm_flags; + unsigned int idx =3D 0, nr; + unsigned long addr, size; + struct vm_struct *area; + struct page **pages; + int err; + + vm_flags =3D kho_flags_to_vmalloc(preservation->flags); + if (vm_flags & ~KHO_VMALLOC_SUPPORTED_FLAGS) + return NULL; + + nr =3D preservation->total_pages; + pages =3D kvmalloc_array(nr, sizeof(*pages), GFP_KERNEL); + if (!pages) + return NULL; + order =3D preservation->order; + shift =3D PAGE_SHIFT + order; + align =3D 1 << shift; + + while (chunk) { + struct page *page; + + for (int i =3D 0; chunk->phys[i]; i++) { + phys_addr_t phys =3D chunk->phys[i]; + + if (idx + (1 << order) > nr) + goto err_free_pages_array; + + for (int j =3D 0; j < (1 << order); j++) { + page =3D phys_to_page(phys); + kho_restore_page(page, 0); + pages[idx++] =3D page; + phys +=3D PAGE_SIZE; + } + } + + page =3D virt_to_page(chunk); + chunk =3D KHOSER_LOAD_PTR(chunk->hdr.next); + kho_restore_page(page, 0); + __free_page(page); + } + + if (idx !=3D nr) + goto err_free_pages_array; + + area =3D __get_vm_area_node(nr * PAGE_SIZE, align, shift, vm_flags, + VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, + GFP_KERNEL, __builtin_return_address(0)); + if (!area) + goto err_free_pages_array; + + addr =3D (unsigned long)area->addr; + size =3D get_vm_area_size(area); + err =3D vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift); + if (err) + goto err_free_vm_area; + + area->pages =3D pages; + area->nr_pages =3D nr; + + return area->addr; + +err_free_vm_area: + free_vm_area(area); +err_free_pages_array: + kvfree(pages); + return NULL; +} +EXPORT_SYMBOL_GPL(kho_restore_vmalloc); + /* Handling for debug/kho/out */ =20 static struct dentry *debugfs_root; --=20 2.50.1