From nobody Tue Sep 9 16:20:04 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1E5FD13D521 for ; Sun, 7 Sep 2025 07:00:32 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757228433; cv=none; b=YETZufxmyZ4GYZq108yXMoo04+FUB59+flWMB4Gf2guFxkCyT/i0WSiJ4vxth8qFK9S0nOWfjfVZhOPEO+tQqbmgfPzwJvTbyqGi98N+GXPSxcbOSvGoKJuXZjiwx2gmfsldY2hAn2gwuCrpZjLY/YhzLZIfFbM7P66cmLzPO1o= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757228433; c=relaxed/simple; bh=7kJZgdLvlkty1R/matrNEswbE0YZDCA8+y+K2Ilbxag=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=OVA03dUYbB6wn5Nchpv+aqz0/Ro89RxL14O0QSNlSbCLkV8aHP9yn64YsuNMLrmkwmmAxrO3ZeWNi1zFkDXPTGpCkFelDBi8nyIJJgbmPgBSFch9WJvtHWhHqisKVOfyT3KpE5Nxv7I8+J0qPK0xg4Y7QDWT0gzcNUOEC0Kgoiw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=VlOT3EFe; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="VlOT3EFe" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 8C9F1C4CEF8; Sun, 7 Sep 2025 07:00:29 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1757228432; bh=7kJZgdLvlkty1R/matrNEswbE0YZDCA8+y+K2Ilbxag=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=VlOT3EFeKTyres4rPOzS3AcP7VlATID/HLbJnnDh+6EybF7zeQv7+4diYrdOTInvV w4me702fgy6FjrPR4FwzHTTFmr3lVzJMmNRCZnqh0NkqX7TC+yzVxBXk1C62NF7sxS yn/HfG1Np32F2F0BESMRSBuw51pYAOVWYEWiF3BDEyt36cwAXoIxrNj+ODbX+v7ZPV pyFVeF0F+isNGfwuRMNgO6oUDW++IlIXsSV5TQBC3QZ6CSg6wC7lR93ggtg3I/Zx7X qJQ5+1W3VTPFZOdeCGaLbIng+d6XQYBGvbnPyLkDDuLmlzW0ERUXdGsasuVtHbptLj dIQQo34KASMOw== From: Mike Rapoport To: Andrew Morton Cc: Alexander Graf , Baoquan He , Changyuan Lyu , Chris Li , Jason Gunthorpe , Mike Rapoport , Pasha Tatashin , Pratyush Yadav , kexec@lists.infradead.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 1/2] kho: add support for preserving vmalloc allocations Date: Sun, 7 Sep 2025 10:00:18 +0300 Message-ID: <20250907070022.2177974-2-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20250907070022.2177974-1-rppt@kernel.org> References: <20250907070022.2177974-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" A vmalloc allocation is preserved using binary structure similar to global KHO memory tracker. It's a linked list of pages where each page is an array of physical address of pages in vmalloc area. kho_preserve_vmalloc() hands out the physical address of the head page to the caller. This address is used as the argument to kho_vmalloc_restore() to restore the mapping in the vmalloc address space and populate it with the preserved pages. Signed-off-by: Mike Rapoport (Microsoft) --- include/linux/kexec_handover.h | 12 ++ kernel/kexec_handover.c | 200 +++++++++++++++++++++++++++++++++ 2 files changed, 212 insertions(+) diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h index 348844cffb13..b7bf3bf11019 100644 --- a/include/linux/kexec_handover.h +++ b/include/linux/kexec_handover.h @@ -42,8 +42,10 @@ struct kho_serialization; bool kho_is_enabled(void); =20 int kho_preserve_folio(struct folio *folio); +int kho_preserve_vmalloc(void *ptr, phys_addr_t *preservation); int kho_preserve_phys(phys_addr_t phys, size_t size); struct folio *kho_restore_folio(phys_addr_t phys); +void *kho_restore_vmalloc(phys_addr_t preservation); int kho_add_subtree(struct kho_serialization *ser, const char *name, void = *fdt); int kho_retrieve_subtree(const char *name, phys_addr_t *phys); =20 @@ -70,11 +72,21 @@ static inline int kho_preserve_phys(phys_addr_t phys, s= ize_t size) return -EOPNOTSUPP; } =20 +static inline int kho_preserve_vmalloc(void *ptr, phys_addr_t *preservatio= n) +{ + return -EOPNOTSUPP; +} + static inline struct folio *kho_restore_folio(phys_addr_t phys) { return NULL; } =20 +static inline void *kho_restore_vmalloc(phys_addr_t preservation) +{ + return NULL; +} + static inline int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt) { diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c index ecd1ac210dbd..c4560ff9b1fc 100644 --- a/kernel/kexec_handover.c +++ b/kernel/kexec_handover.c @@ -18,6 +18,7 @@ #include #include #include +#include =20 #include =20 @@ -733,6 +734,205 @@ int kho_preserve_phys(phys_addr_t phys, size_t size) } EXPORT_SYMBOL_GPL(kho_preserve_phys); =20 +struct kho_vmalloc_chunk; + +struct kho_vmalloc_hdr { + DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *); + unsigned int total_pages; /* only valid in the first chunk */ + unsigned int flags; /* only valid in the first chunk */ + unsigned short order; /* only valid in the first chunk */ + unsigned short num_elms; +}; + +#define KHO_VMALLOC_SIZE \ + ((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \ + sizeof(phys_addr_t)) + +struct kho_vmalloc_chunk { + struct kho_vmalloc_hdr hdr; + phys_addr_t phys[KHO_VMALLOC_SIZE]; +}; + +static_assert(sizeof(struct kho_vmalloc_chunk) =3D=3D PAGE_SIZE); + +#define KHO_VMALLOC_FLAGS_MASK (VM_ALLOC | VM_ALLOW_HUGE_VMAP) + +static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chun= k *cur) +{ + struct kho_vmalloc_chunk *chunk; + int err; + + chunk =3D kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!chunk) + return NULL; + + err =3D kho_preserve_phys(virt_to_phys(chunk), PAGE_SIZE); + if (err) + goto err_free; + if (cur) + KHOSER_STORE_PTR(cur->hdr.next, chunk); + return chunk; + +err_free: + kfree(chunk); + return NULL; +} + +static void kho_vmalloc_free_chunks(struct kho_vmalloc_chunk *first_chunk) +{ + struct kho_mem_track *track =3D &kho_out.ser.track; + struct kho_vmalloc_chunk *chunk =3D first_chunk; + + while (chunk) { + unsigned long pfn =3D PHYS_PFN(virt_to_phys(chunk)); + struct kho_vmalloc_chunk *tmp =3D chunk; + + __kho_unpreserve(track, pfn, pfn + 1); + + chunk =3D KHOSER_LOAD_PTR(chunk->hdr.next); + kfree(tmp); + } +} + +/** + * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across = kexec + * @ptr: pointer to the area in vmalloc address space + * @preservation: returned physical address of preservation metadata + * + * Instructs KHO to preserve the area in vmalloc address space at @ptr. The + * physical pages mapped at @ptr will be preserved and on successful return + * @preservation will hold the physical address of a structure that descri= bes + * the preservation. + * + * NOTE: The memory allocated with vmalloc_node() variants cannot be relia= bly + * restored on the same node + * + * Return: 0 on success, error code on failure + */ +int kho_preserve_vmalloc(void *ptr, phys_addr_t *preservation) +{ + struct kho_mem_track *track =3D &kho_out.ser.track; + struct kho_vmalloc_chunk *chunk, *first_chunk; + struct vm_struct *vm =3D find_vm_area(ptr); + unsigned int order, flags; + int err; + + if (!vm) + return -EINVAL; + + if (vm->flags & ~KHO_VMALLOC_FLAGS_MASK) + return -EOPNOTSUPP; + + flags =3D vm->flags & KHO_VMALLOC_FLAGS_MASK; + order =3D get_vm_area_page_order(vm); + + chunk =3D new_vmalloc_chunk(NULL); + if (!chunk) + return -ENOMEM; + first_chunk =3D chunk; + first_chunk->hdr.total_pages =3D vm->nr_pages; + first_chunk->hdr.flags =3D flags; + first_chunk->hdr.order =3D order; + + for (int i =3D 0; i < vm->nr_pages; i +=3D (1 << order)) { + phys_addr_t phys =3D page_to_phys(vm->pages[i]); + + err =3D __kho_preserve_order(track, PHYS_PFN(phys), order); + if (err) + goto err_free; + + chunk->phys[chunk->hdr.num_elms] =3D phys; + chunk->hdr.num_elms++; + if (chunk->hdr.num_elms =3D=3D ARRAY_SIZE(chunk->phys)) { + chunk =3D new_vmalloc_chunk(chunk); + if (!chunk) + goto err_free; + } + } + + *preservation =3D virt_to_phys(first_chunk); + return 0; + +err_free: + kho_vmalloc_free_chunks(first_chunk); + return err; +} +EXPORT_SYMBOL_GPL(kho_preserve_vmalloc); + +/** + * kho_restore_vmalloc - recreates and populates an area in vmalloc address + * space from the preserved memory. + * @preservation: physical address of the preservation metadata. + * + * Recreates an area in vmalloc address space and populates it with memory= that + * was preserved using kho_preserve_vmalloc(). + * + * Return: pointer to the area in the vmalloc address space, NULL on failu= re. + */ +void *kho_restore_vmalloc(phys_addr_t preservation) +{ + struct kho_vmalloc_chunk *chunk =3D phys_to_virt(preservation); + unsigned int align, order, shift, flags; + unsigned int idx =3D 0, nr; + unsigned long addr, size; + struct vm_struct *area; + struct page **pages; + int err; + + flags =3D chunk->hdr.flags; + if (flags & ~KHO_VMALLOC_FLAGS_MASK) + return NULL; + + nr =3D chunk->hdr.total_pages; + pages =3D kvmalloc_array(nr, sizeof(*pages), GFP_KERNEL); + if (!pages) + return NULL; + order =3D chunk->hdr.order; + shift =3D PAGE_SHIFT + order; + align =3D 1 << shift; + + while (chunk) { + struct page *page; + + for (int i =3D 0; i < chunk->hdr.num_elms; i++) { + phys_addr_t phys =3D chunk->phys[i]; + + for (int j =3D 0; j < (1 << order); j++) { + page =3D phys_to_page(phys); + kho_restore_page(page, 0); + pages[idx++] =3D page; + phys +=3D PAGE_SIZE; + } + } + + page =3D virt_to_page(chunk); + chunk =3D KHOSER_LOAD_PTR(chunk->hdr.next); + kho_restore_page(page, 0); + __free_page(page); + } + + area =3D __get_vm_area_node(nr * PAGE_SIZE, align, shift, flags, + VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, + GFP_KERNEL, __builtin_return_address(0)); + if (!area) + goto err_free_pages_array; + + addr =3D (unsigned long)area->addr; + size =3D get_vm_area_size(area); + err =3D vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift); + if (err) + goto err_free_vm_area; + + return area->addr; + +err_free_vm_area: + free_vm_area(area); +err_free_pages_array: + kvfree(pages); + return NULL; +} +EXPORT_SYMBOL_GPL(kho_restore_vmalloc); + /* Handling for debug/kho/out */ =20 static struct dentry *debugfs_root; --=20 2.50.1 From nobody Tue Sep 9 16:20:04 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 998D01F3BB5 for ; Sun, 7 Sep 2025 07:00:43 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757228443; cv=none; b=CakkOgaTfiAu/macgjWMj9DEWSPISas+fe4jKIwWAr3QG1Pi55foxw/Mmip/I5u3KBH+uHviLX+1XQE4cWB5KNd/EaXrjpc7PFGBMJ/KdB8FqkIlRNnK3wu65R/qNo9JZnYapIwiB3pPs0LjwynONErm59UK07HgY+Xy0UNz0g4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757228443; c=relaxed/simple; bh=luBH4/8PXqGM2HNziwU/5uMhkj6i/VmZqnkoGkwcoNw=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Hw+EZh1LGF86G7KlhxQzKRn7unuTcK7WWFwhgI5DwxC63Oz70JWGdBS7b+oJEuOXKiP6+FkP51I1t8oE+L1Zy7su3VUCDWAZ2HiMtde3tZAobq9NVfBlU5v+oy/jMXf3ySAf92Kj3JYLyXO4yKtiMZNbrWHfInvhpM3CvLqDULI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=RioIoARx; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="RioIoARx" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5EF34C4CEF0; Sun, 7 Sep 2025 07:00:40 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1757228443; bh=luBH4/8PXqGM2HNziwU/5uMhkj6i/VmZqnkoGkwcoNw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=RioIoARxwgT06rU/NNN2OtMbczZJl68wxYgWZ2767kzj/ZtDYgz/+bHqp74XZoq2m laGz/9nducUfx2u1kvrM+8Fsg4ZResbcdsmtz4Lgh+0IRFj29TrZQz3M/MPDXTpl9w s7ElQl8sP6CHMmatQggBR/OheT4rsbPyuyEQOCP2VSkz7zKEM8UEcBgNsf/sXND4zo 8fGCaZp61X94TBwrqr6MeUZR59UBIs07HQHu3P2URi0qp+NVW6E0tc+BU8SGJAVl6j bRDD9fXVLApmpFyncFMIgUMa3oPeI0zSupDG0U3aUaKqY3GOgwve1pcdyEr+xot+qp VgV+l9wyuyO2w== From: Mike Rapoport To: Andrew Morton Cc: Alexander Graf , Baoquan He , Changyuan Lyu , Chris Li , Jason Gunthorpe , Mike Rapoport , Pasha Tatashin , Pratyush Yadav , kexec@lists.infradead.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v3 1/2] kho: add support for preserving vmalloc allocations Date: Sun, 7 Sep 2025 10:00:21 +0300 Message-ID: <20250907070022.2177974-5-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20250907070022.2177974-1-rppt@kernel.org> References: <20250907070022.2177974-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" A vmalloc allocation is preserved using binary structure similar to global KHO memory tracker. It's a linked list of pages where each page is an array of physical address of pages in vmalloc area. kho_preserve_vmalloc() hands out the physical address of the head page to the caller. This address is used as the argument to kho_vmalloc_restore() to restore the mapping in the vmalloc address space and populate it with the preserved pages. Signed-off-by: Mike Rapoport (Microsoft) --- include/linux/kexec_handover.h | 12 ++ kernel/kexec_handover.c | 200 +++++++++++++++++++++++++++++++++ 2 files changed, 212 insertions(+) diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h index 348844cffb13..b7bf3bf11019 100644 --- a/include/linux/kexec_handover.h +++ b/include/linux/kexec_handover.h @@ -42,8 +42,10 @@ struct kho_serialization; bool kho_is_enabled(void); =20 int kho_preserve_folio(struct folio *folio); +int kho_preserve_vmalloc(void *ptr, phys_addr_t *preservation); int kho_preserve_phys(phys_addr_t phys, size_t size); struct folio *kho_restore_folio(phys_addr_t phys); +void *kho_restore_vmalloc(phys_addr_t preservation); int kho_add_subtree(struct kho_serialization *ser, const char *name, void = *fdt); int kho_retrieve_subtree(const char *name, phys_addr_t *phys); =20 @@ -70,11 +72,21 @@ static inline int kho_preserve_phys(phys_addr_t phys, s= ize_t size) return -EOPNOTSUPP; } =20 +static inline int kho_preserve_vmalloc(void *ptr, phys_addr_t *preservatio= n) +{ + return -EOPNOTSUPP; +} + static inline struct folio *kho_restore_folio(phys_addr_t phys) { return NULL; } =20 +static inline void *kho_restore_vmalloc(phys_addr_t preservation) +{ + return NULL; +} + static inline int kho_add_subtree(struct kho_serialization *ser, const char *name, void *fdt) { diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c index 8079fc4b9189..1177cc5ffa1a 100644 --- a/kernel/kexec_handover.c +++ b/kernel/kexec_handover.c @@ -18,6 +18,7 @@ #include #include #include +#include =20 #include =20 @@ -742,6 +743,205 @@ int kho_preserve_phys(phys_addr_t phys, size_t size) } EXPORT_SYMBOL_GPL(kho_preserve_phys); =20 +struct kho_vmalloc_chunk; + +struct kho_vmalloc_hdr { + DECLARE_KHOSER_PTR(next, struct kho_vmalloc_chunk *); + unsigned int total_pages; /* only valid in the first chunk */ + unsigned int flags; /* only valid in the first chunk */ + unsigned short order; /* only valid in the first chunk */ + unsigned short num_elms; +}; + +#define KHO_VMALLOC_SIZE \ + ((PAGE_SIZE - sizeof(struct kho_vmalloc_hdr)) / \ + sizeof(phys_addr_t)) + +struct kho_vmalloc_chunk { + struct kho_vmalloc_hdr hdr; + phys_addr_t phys[KHO_VMALLOC_SIZE]; +}; + +static_assert(sizeof(struct kho_vmalloc_chunk) =3D=3D PAGE_SIZE); + +#define KHO_VMALLOC_FLAGS_MASK (VM_ALLOC | VM_ALLOW_HUGE_VMAP) + +static struct kho_vmalloc_chunk *new_vmalloc_chunk(struct kho_vmalloc_chun= k *cur) +{ + struct kho_vmalloc_chunk *chunk; + int err; + + chunk =3D kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!chunk) + return NULL; + + err =3D kho_preserve_phys(virt_to_phys(chunk), PAGE_SIZE); + if (err) + goto err_free; + if (cur) + KHOSER_STORE_PTR(cur->hdr.next, chunk); + return chunk; + +err_free: + kfree(chunk); + return NULL; +} + +static void kho_vmalloc_free_chunks(struct kho_vmalloc_chunk *first_chunk) +{ + struct kho_mem_track *track =3D &kho_out.ser.track; + struct kho_vmalloc_chunk *chunk =3D first_chunk; + + while (chunk) { + unsigned long pfn =3D PHYS_PFN(virt_to_phys(chunk)); + struct kho_vmalloc_chunk *tmp =3D chunk; + + __kho_unpreserve(track, pfn, pfn + 1); + + chunk =3D KHOSER_LOAD_PTR(chunk->hdr.next); + kfree(tmp); + } +} + +/** + * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across = kexec + * @ptr: pointer to the area in vmalloc address space + * @preservation: returned physical address of preservation metadata + * + * Instructs KHO to preserve the area in vmalloc address space at @ptr. The + * physical pages mapped at @ptr will be preserved and on successful return + * @preservation will hold the physical address of a structure that descri= bes + * the preservation. + * + * NOTE: The memory allocated with vmalloc_node() variants cannot be relia= bly + * restored on the same node + * + * Return: 0 on success, error code on failure + */ +int kho_preserve_vmalloc(void *ptr, phys_addr_t *preservation) +{ + struct kho_mem_track *track =3D &kho_out.ser.track; + struct kho_vmalloc_chunk *chunk, *first_chunk; + struct vm_struct *vm =3D find_vm_area(ptr); + unsigned int order, flags; + int err; + + if (!vm) + return -EINVAL; + + if (vm->flags & ~KHO_VMALLOC_FLAGS_MASK) + return -EOPNOTSUPP; + + flags =3D vm->flags & KHO_VMALLOC_FLAGS_MASK; + order =3D get_vm_area_page_order(vm); + + chunk =3D new_vmalloc_chunk(NULL); + if (!chunk) + return -ENOMEM; + first_chunk =3D chunk; + first_chunk->hdr.total_pages =3D vm->nr_pages; + first_chunk->hdr.flags =3D flags; + first_chunk->hdr.order =3D order; + + for (int i =3D 0; i < vm->nr_pages; i +=3D (1 << order)) { + phys_addr_t phys =3D page_to_phys(vm->pages[i]); + + err =3D __kho_preserve_order(track, PHYS_PFN(phys), order); + if (err) + goto err_free; + + chunk->phys[chunk->hdr.num_elms] =3D phys; + chunk->hdr.num_elms++; + if (chunk->hdr.num_elms =3D=3D ARRAY_SIZE(chunk->phys)) { + chunk =3D new_vmalloc_chunk(chunk); + if (!chunk) + goto err_free; + } + } + + *preservation =3D virt_to_phys(first_chunk); + return 0; + +err_free: + kho_vmalloc_free_chunks(first_chunk); + return err; +} +EXPORT_SYMBOL_GPL(kho_preserve_vmalloc); + +/** + * kho_restore_vmalloc - recreates and populates an area in vmalloc address + * space from the preserved memory. + * @preservation: physical address of the preservation metadata. + * + * Recreates an area in vmalloc address space and populates it with memory= that + * was preserved using kho_preserve_vmalloc(). + * + * Return: pointer to the area in the vmalloc address space, NULL on failu= re. + */ +void *kho_restore_vmalloc(phys_addr_t preservation) +{ + struct kho_vmalloc_chunk *chunk =3D phys_to_virt(preservation); + unsigned int align, order, shift, flags; + unsigned int idx =3D 0, nr; + unsigned long addr, size; + struct vm_struct *area; + struct page **pages; + int err; + + flags =3D chunk->hdr.flags; + if (flags & ~KHO_VMALLOC_FLAGS_MASK) + return NULL; + + nr =3D chunk->hdr.total_pages; + pages =3D kvmalloc_array(nr, sizeof(*pages), GFP_KERNEL); + if (!pages) + return NULL; + order =3D chunk->hdr.order; + shift =3D PAGE_SHIFT + order; + align =3D 1 << shift; + + while (chunk) { + struct page *page; + + for (int i =3D 0; i < chunk->hdr.num_elms; i++) { + phys_addr_t phys =3D chunk->phys[i]; + + for (int j =3D 0; j < (1 << order); j++) { + page =3D phys_to_page(phys); + kho_restore_page(page, 0); + pages[idx++] =3D page; + phys +=3D PAGE_SIZE; + } + } + + page =3D virt_to_page(chunk); + chunk =3D KHOSER_LOAD_PTR(chunk->hdr.next); + kho_restore_page(page, 0); + __free_page(page); + } + + area =3D __get_vm_area_node(nr * PAGE_SIZE, align, shift, flags, + VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, + GFP_KERNEL, __builtin_return_address(0)); + if (!area) + goto err_free_pages_array; + + addr =3D (unsigned long)area->addr; + size =3D get_vm_area_size(area); + err =3D vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, shift); + if (err) + goto err_free_vm_area; + + return area->addr; + +err_free_vm_area: + free_vm_area(area); +err_free_pages_array: + kvfree(pages); + return NULL; +} +EXPORT_SYMBOL_GPL(kho_restore_vmalloc); + /* Handling for debug/kho/out */ =20 static struct dentry *debugfs_root; --=20 2.50.1 From nobody Tue Sep 9 16:20:04 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B14DE1F55F8 for ; Sun, 7 Sep 2025 07:00:36 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757228436; cv=none; b=NoiVkdIRzLo1jNJoqkr4vqgqAsZZH0quDZKkG2cFJc2sqE30sTMoorsT6bHdWoT0Nu2Y75itkDXdBjFX74tq6meIb+L8X7vCfTNL9bKTX3qDcsWptentL4h4oGmHfF9ZgS8moz6eg1msOCUYnzjZiBpnmvswKZ94urL9ueBADy4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757228436; c=relaxed/simple; bh=C3NlxNYz6bfCABhEIee3Vq9GbnEKAONZKNFONpPiN3c=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=U7Q/rBOcEjvwrlWcDKAwguU97hJ4wKUwxS5wHTpgK6lLyct4i8WYPLysgyc5QYCaTZIauWYn0Jn3ItBtmT4YmPhrL2dJzfPdDD10S6ejN5n0oFDghD5XZ7+xkBRt3J9Da4QQJRm2zO0YP19NwFG6tQKJ5JIVSoyLB/CgIFSmUEQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=L+GFJd57; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="L+GFJd57" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 2BB81C4CEF0; Sun, 7 Sep 2025 07:00:32 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1757228436; bh=C3NlxNYz6bfCABhEIee3Vq9GbnEKAONZKNFONpPiN3c=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=L+GFJd57FHStAx/OTPu1bR/rH7JaPjT7G4TukR1LSAMtHgjk/Qt/7IGGznoGvvkkP ygPeIcefuWpuWlpPgq8eGOt1+9wnFQvUp+3kjEKk+mtrjH+p93VPhhvmaSrTXEt/KY nFszmHwkVUN5lU/ioEXXJTufA+3i49fu8IE/E/cmiwKgNp5WQBg3S8SuWYHDJCeNSm vl8N03RC39N7GrDJAvXIaR9FAIri6XF1nSA7dAX0CZvb5hysuMQH/YkjjAhzHgakje /MIY6t/1rvrlGGUk+bHm76OHvuiGRCHnzFO39I82lMyOnWV84f1iHcH8wgLLD2Ii+I twEkTFHNZoEiw== From: Mike Rapoport To: Andrew Morton Cc: Alexander Graf , Baoquan He , Changyuan Lyu , Chris Li , Jason Gunthorpe , Mike Rapoport , Pasha Tatashin , Pratyush Yadav , kexec@lists.infradead.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 2/2] lib/test_kho: use kho_preserve_vmalloc instead of storing addresses in fdt Date: Sun, 7 Sep 2025 10:00:19 +0300 Message-ID: <20250907070022.2177974-3-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20250907070022.2177974-1-rppt@kernel.org> References: <20250907070022.2177974-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" KHO test stores physical addresses of the preserved folios directly in fdt. Use kho_preserve_vmalloc() instead of it and kho_restore_vmalloc() to retrieve the addresses after kexec. This makes the test more scalable from one side and adds tests coverage for kho_preserve_vmalloc() from the other. Signed-off-by: Mike Rapoport (Microsoft) --- lib/test_kho.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/lib/test_kho.c b/lib/test_kho.c index c2eb899c3b45..10045f5979a0 100644 --- a/lib/test_kho.c +++ b/lib/test_kho.c @@ -32,6 +32,7 @@ module_param(max_mem, long, 0644); struct kho_test_state { unsigned int nr_folios; struct folio **folios; + phys_addr_t *folios_info; struct folio *fdt; __wsum csum; }; @@ -68,13 +69,17 @@ static struct notifier_block kho_test_nb =3D { static int kho_test_save_data(struct kho_test_state *state, void *fdt) { phys_addr_t *folios_info __free(kvfree) =3D NULL; + phys_addr_t folios_info_phys; int err =3D 0; =20 - folios_info =3D kvmalloc_array(state->nr_folios, sizeof(*folios_info), - GFP_KERNEL); + folios_info =3D vmalloc_array(state->nr_folios, sizeof(*folios_info)); if (!folios_info) return -ENOMEM; =20 + err =3D kho_preserve_vmalloc(folios_info, &folios_info_phys); + if (err) + return err; + for (int i =3D 0; i < state->nr_folios; i++) { struct folio *folio =3D state->folios[i]; unsigned int order =3D folio_order(folio); @@ -89,11 +94,14 @@ static int kho_test_save_data(struct kho_test_state *st= ate, void *fdt) err |=3D fdt_begin_node(fdt, "data"); err |=3D fdt_property(fdt, "nr_folios", &state->nr_folios, sizeof(state->nr_folios)); - err |=3D fdt_property(fdt, "folios_info", folios_info, - state->nr_folios * sizeof(*folios_info)); + err |=3D fdt_property(fdt, "folios_info", &folios_info_phys, + sizeof(folios_info_phys)); err |=3D fdt_property(fdt, "csum", &state->csum, sizeof(state->csum)); err |=3D fdt_end_node(fdt); =20 + if (!err) + state->folios_info =3D no_free_ptr(folios_info); + return err; } =20 @@ -197,7 +205,8 @@ static int kho_test_save(void) static int kho_test_restore_data(const void *fdt, int node) { const unsigned int *nr_folios; - const phys_addr_t *folios_info; + const phys_addr_t *folios_info_phys; + phys_addr_t *folios_info; const __wsum *old_csum; __wsum csum =3D 0; int len; @@ -212,8 +221,12 @@ static int kho_test_restore_data(const void *fdt, int = node) if (!old_csum || len !=3D sizeof(*old_csum)) return -EINVAL; =20 - folios_info =3D fdt_getprop(fdt, node, "folios_info", &len); - if (!folios_info || len !=3D sizeof(*folios_info) * *nr_folios) + folios_info_phys =3D fdt_getprop(fdt, node, "folios_info", &len); + if (!folios_info_phys || len !=3D sizeof(*folios_info_phys)) + return -EINVAL; + + folios_info =3D kho_restore_vmalloc(*folios_info_phys); + if (!folios_info) return -EINVAL; =20 for (int i =3D 0; i < *nr_folios; i++) { @@ -233,6 +246,8 @@ static int kho_test_restore_data(const void *fdt, int n= ode) folio_put(folio); } =20 + vfree(folios_info); + if (csum !=3D *old_csum) return -EINVAL; =20 @@ -291,6 +306,7 @@ static void kho_test_cleanup(void) folio_put(kho_test_state.folios[i]); =20 kvfree(kho_test_state.folios); + vfree(kho_test_state.folios_info); } =20 static void __exit kho_test_exit(void) --=20 2.50.1 From nobody Tue Sep 9 16:20:04 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 78818202C2B for ; Sun, 7 Sep 2025 07:00:47 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757228447; cv=none; b=hCkxlu1LfDqSgkwyCfTRjZsyT4BCgCAtH6DRLP6h1ym6YMwjKq3BgEPquOSesu/Qsf1mBn/qcdPYi8EMGGmnomR1mjTgEm+ndhMFNkzQCBR1NJgDaQpYIdb8nqD3S4hL5sLkVCZ9uUVspLR46KFlI1lom57BQyHT9FGEScmuoL8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1757228447; c=relaxed/simple; bh=S6cq5Fd+Tt7edEdfLD7ze+ZKtAEEGtzWPj0q1zSXPO4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=p7aH7QP+47AOtBi+VdxV+w59DIrWe8yBuv6C2sxBb4DRTsqp8oySU5ZgM/FaffbDgP4x8wHHuabKeSwh7k9xm3F2STeX9GumQYvYBPSWQLeEK2Ilo9dSdtMOIiTy+VldsmstU33DZZeRr2uJkgbJ7sEA1CTmsb1c1VW5QHQHCoo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=HEZKwHsm; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="HEZKwHsm" Received: by smtp.kernel.org (Postfix) with ESMTPSA id F2ADDC4CEFB; Sun, 7 Sep 2025 07:00:43 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1757228447; bh=S6cq5Fd+Tt7edEdfLD7ze+ZKtAEEGtzWPj0q1zSXPO4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=HEZKwHsmg+QtBW5UFiuYs26fucUIr6wGXLeddggrTHC9Qgqatmc8T374QZ/+Eg7eN 3Cp2tnZpsbMiwBTqNfFboEAuvBqaFsEi2cwjiokLUKJf+eiR/hyZKhyERzjaRZiqKO qWcUlZigxroFfPXhC2aRs9Liyf/KGq+idNdgxbi4uUL7csesh9HXkeB1uckoUz3h3N oXUjzf1vww1oWvfB9l4KyHTe1abGjyAtQxaTXgFwnA3jM3KpdfipijBM6BFPCW5uo0 Asp36UxHGNj9ZisfH32xiFhzIDbj06EjDDQfURoMxacHCdyPXEaApf5f8glSSLDeCf Lsnk/dTM0xRVg== From: Mike Rapoport To: Andrew Morton Cc: Alexander Graf , Baoquan He , Changyuan Lyu , Chris Li , Jason Gunthorpe , Mike Rapoport , Pasha Tatashin , Pratyush Yadav , kexec@lists.infradead.org, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v3 2/2] lib/test_kho: use kho_preserve_vmalloc instead of storing addresses in fdt Date: Sun, 7 Sep 2025 10:00:22 +0300 Message-ID: <20250907070022.2177974-6-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20250907070022.2177974-1-rppt@kernel.org> References: <20250907070022.2177974-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" KHO test stores physical addresses of the preserved folios directly in fdt. Use kho_preserve_vmalloc() instead of it and kho_restore_vmalloc() to retrieve the addresses after kexec. This makes the test more scalable from one side and adds tests coverage for kho_preserve_vmalloc() from the other. Signed-off-by: Mike Rapoport (Microsoft) --- lib/test_kho.c | 41 +++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/lib/test_kho.c b/lib/test_kho.c index fe8504e3407b..c46f577b6aee 100644 --- a/lib/test_kho.c +++ b/lib/test_kho.c @@ -32,6 +32,7 @@ module_param(max_mem, long, 0644); struct kho_test_state { unsigned int nr_folios; struct folio **folios; + phys_addr_t *folios_info; struct folio *fdt; __wsum csum; }; @@ -67,18 +68,15 @@ static struct notifier_block kho_test_nb =3D { =20 static int kho_test_save_data(struct kho_test_state *state, void *fdt) { - phys_addr_t *folios_info; + phys_addr_t *folios_info __free(kvfree) =3D NULL; + phys_addr_t folios_info_phys; int err =3D 0; =20 - err |=3D fdt_begin_node(fdt, "data"); - err |=3D fdt_property(fdt, "nr_folios", &state->nr_folios, - sizeof(state->nr_folios)); - err |=3D fdt_property_placeholder(fdt, "folios_info", - state->nr_folios * sizeof(*folios_info), - (void **)&folios_info); - err |=3D fdt_property(fdt, "csum", &state->csum, sizeof(state->csum)); - err |=3D fdt_end_node(fdt); + folios_info =3D vmalloc_array(state->nr_folios, sizeof(*folios_info)); + if (!folios_info) + return -ENOMEM; =20 + err =3D kho_preserve_vmalloc(folios_info, &folios_info_phys); if (err) return err; =20 @@ -93,6 +91,17 @@ static int kho_test_save_data(struct kho_test_state *sta= te, void *fdt) break; } =20 + err |=3D fdt_begin_node(fdt, "data"); + err |=3D fdt_property(fdt, "nr_folios", &state->nr_folios, + sizeof(state->nr_folios)); + err |=3D fdt_property(fdt, "folios_info", &folios_info_phys, + sizeof(folios_info_phys)); + err |=3D fdt_property(fdt, "csum", &state->csum, sizeof(state->csum)); + err |=3D fdt_end_node(fdt); + + if (!err) + state->folios_info =3D no_free_ptr(folios_info); + return err; } =20 @@ -210,7 +219,8 @@ static int kho_test_save(void) static int kho_test_restore_data(const void *fdt, int node) { const unsigned int *nr_folios; - const phys_addr_t *folios_info; + const phys_addr_t *folios_info_phys; + phys_addr_t *folios_info; const __wsum *old_csum; __wsum csum =3D 0; int len; @@ -225,8 +235,12 @@ static int kho_test_restore_data(const void *fdt, int = node) if (!old_csum || len !=3D sizeof(*old_csum)) return -EINVAL; =20 - folios_info =3D fdt_getprop(fdt, node, "folios_info", &len); - if (!folios_info || len !=3D sizeof(*folios_info) * *nr_folios) + folios_info_phys =3D fdt_getprop(fdt, node, "folios_info", &len); + if (!folios_info_phys || len !=3D sizeof(*folios_info_phys)) + return -EINVAL; + + folios_info =3D kho_restore_vmalloc(*folios_info_phys); + if (!folios_info) return -EINVAL; =20 for (int i =3D 0; i < *nr_folios; i++) { @@ -246,6 +260,8 @@ static int kho_test_restore_data(const void *fdt, int n= ode) folio_put(folio); } =20 + vfree(folios_info); + if (csum !=3D *old_csum) return -EINVAL; =20 @@ -304,6 +320,7 @@ static void kho_test_cleanup(void) folio_put(kho_test_state.folios[i]); =20 kvfree(kho_test_state.folios); + vfree(kho_test_state.folios_info); folio_put(kho_test_state.fdt); } =20 --=20 2.50.1