Use the newly introduced crash_prepare_headers() function to replace
the existing prepare_elf_headers(), allocate cmem and exclude crash kernel
memory in the crash core, which reduce code duplication.
Only the following three architecture functions need to be implemented:
- arch_get_system_nr_ranges(). Call get_nr_ram_ranges_callback()
to pre-count the max number of memory ranges.
- arch_crash_populate_cmem(). Use prepare_elf64_ram_headers_callback()
to collect the memory ranges and fills them into cmem.
- arch_crash_exclude_ranges(). Exclude the low 1M for x86.
By the way, remove the unused "nr_mem_ranges" in
arch_crash_handle_hotplug_event().
Cc: Thomas Gleixner <tglx@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Reviewed-by: Sourabh Jain <sourabhjain@linux.ibm.com>
Acked-by: Baoquan He <bhe@redhat.com>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
arch/x86/kernel/crash.c | 89 +++++------------------------------------
1 file changed, 11 insertions(+), 78 deletions(-)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 7fa6d45ebe3f..10ef24611f2a 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -152,16 +152,8 @@ static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
return 0;
}
-/* Gather all the required information to prepare elf headers for ram regions */
-static struct crash_mem *fill_up_crash_elf_data(void)
+unsigned int arch_get_system_nr_ranges(void)
{
- unsigned int nr_ranges = 0;
- struct crash_mem *cmem;
-
- walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
- if (!nr_ranges)
- return NULL;
-
/*
* Exclusion of crash region, crashk_low_res and/or crashk_cma_ranges
* may cause range splits. So add extra slots here.
@@ -176,49 +168,16 @@ static struct crash_mem *fill_up_crash_elf_data(void)
* But in order to lest the low 1M could be changed in the future,
* (e.g. [start, 1M]), add a extra slot.
*/
- nr_ranges += 3 + crashk_cma_cnt;
- cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
- if (!cmem)
- return NULL;
-
- cmem->max_nr_ranges = nr_ranges;
+ unsigned int nr_ranges = 3 + crashk_cma_cnt;
- return cmem;
+ walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
+ return nr_ranges;
}
-/*
- * Look for any unwanted ranges between mstart, mend and remove them. This
- * might lead to split and split ranges are put in cmem->ranges[] array
- */
-static int elf_header_exclude_ranges(struct crash_mem *cmem)
+int arch_crash_exclude_ranges(struct crash_mem *cmem)
{
- int ret = 0;
- int i;
-
/* Exclude the low 1M because it is always reserved */
- ret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1);
- if (ret)
- return ret;
-
- /* Exclude crashkernel region */
- ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
- if (ret)
- return ret;
-
- if (crashk_low_res.end)
- ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
- crashk_low_res.end);
- if (ret)
- return ret;
-
- for (i = 0; i < crashk_cma_cnt; ++i) {
- ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
- crashk_cma_ranges[i].end);
- if (ret)
- return ret;
- }
-
- return 0;
+ return crash_exclude_mem_range(cmem, 0, SZ_1M - 1);
}
static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
@@ -235,35 +194,9 @@ static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
return 0;
}
-/* Prepare elf headers. Return addr and size */
-static int prepare_elf_headers(void **addr, unsigned long *sz,
- unsigned long *nr_mem_ranges)
+int arch_crash_populate_cmem(struct crash_mem *cmem)
{
- struct crash_mem *cmem;
- int ret;
-
- cmem = fill_up_crash_elf_data();
- if (!cmem)
- return -ENOMEM;
-
- ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
- if (ret)
- goto out;
-
- /* Exclude unwanted mem ranges */
- ret = elf_header_exclude_ranges(cmem);
- if (ret)
- goto out;
-
- /* Return the computed number of memory ranges, for hotplug usage */
- *nr_mem_ranges = cmem->nr_ranges;
-
- /* By default prepare 64bit headers */
- ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
-
-out:
- vfree(cmem);
- return ret;
+ return walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
}
#endif
@@ -421,7 +354,8 @@ int crash_load_segments(struct kimage *image)
.buf_max = ULONG_MAX, .top_down = false };
/* Prepare elf headers and add a segment */
- ret = prepare_elf_headers(&kbuf.buffer, &kbuf.bufsz, &pnum);
+ ret = crash_prepare_headers(IS_ENABLED(CONFIG_X86_64), &kbuf.buffer,
+ &kbuf.bufsz, &pnum);
if (ret)
return ret;
@@ -514,7 +448,6 @@ unsigned int arch_crash_get_elfcorehdr_size(void)
void arch_crash_handle_hotplug_event(struct kimage *image, void *arg)
{
void *elfbuf = NULL, *old_elfcorehdr;
- unsigned long nr_mem_ranges;
unsigned long mem, memsz;
unsigned long elfsz = 0;
@@ -532,7 +465,7 @@ void arch_crash_handle_hotplug_event(struct kimage *image, void *arg)
* Create the new elfcorehdr reflecting the changes to CPU and/or
* memory resources.
*/
- if (prepare_elf_headers(&elfbuf, &elfsz, &nr_mem_ranges)) {
+ if (crash_prepare_headers(IS_ENABLED(CONFIG_X86_64), &elfbuf, &elfsz, NULL)) {
pr_err("unable to create new elfcorehdr");
goto out;
}
--
2.34.1