Keep the alternative related sections, located at the beginning of the
.init section, around if needed.
Signed-off-by: David Kaplan <david.kaplan@amd.com>
---
arch/x86/mm/init.c | 12 ++++++++++--
arch/x86/mm/mm_internal.h | 2 ++
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 8bf6ad4b9400..8dfde4889a09 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -977,8 +977,16 @@ void __ref free_initmem(void)
mem_encrypt_free_decrypted_mem();
- free_kernel_image_pages("unused kernel image (initmem)",
- &__init_begin, &__init_end);
+ /*
+ * __init_alt_end is after the alternative sections in case we need to
+ * keep that around to support runtime patching.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_MITIGATIONS))
+ free_kernel_image_pages("unused kernel image (initmem)",
+ &__init_alt_end, &__init_end);
+ else
+ free_kernel_image_pages("unused kernel image (initmem)",
+ &__init_begin, &__init_end);
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h
index 097aadc250f7..e961f2257009 100644
--- a/arch/x86/mm/mm_internal.h
+++ b/arch/x86/mm/mm_internal.h
@@ -29,4 +29,6 @@ extern unsigned long tlb_single_page_flush_ceiling;
void __init x86_numa_init(void);
#endif
+extern void *__init_alt_end;
+
#endif /* __X86_MM_INTERNAL_H */
--
2.34.1