Documentation/admin-guide/kernel-parameters.txt | 2 +- arch/arm64/kernel/machine_kexec_file.c | 15 ++++++++++++++- arch/arm64/mm/init.c | 5 +++-- 3 files changed, 18 insertions(+), 4 deletions(-)
Commit 35c18f2933c5 ("Add a new optional ",cma" suffix to the
crashkernel= command line option") and commit ab475510e042 ("kdump:
implement reserve_crashkernel_cma") added CMA support for kdump
crashkernel reservation.
Crash kernel memory reservation wastes production resources if too
large, risks kdump failure if too small, and faces allocation difficulties
on fragmented systems due to contiguous block constraints. The new
CMA-based crashkernel reservation scheme splits the "large fixed
reservation" into a "small fixed region + large CMA dynamic region": the
CMA memory is available to userspace during normal operation to avoid
waste, and is reclaimed for kdump upon crash—saving memory while
improving reliability.
So extend crashkernel CMA reservation support to arm64. The following
changes are made to enable CMA reservation:
- Parse and obtain the CMA reservation size along with other crashkernel
parameters.
- Call reserve_crashkernel_cma() to allocate the CMA region for kdump.
- Include the CMA-reserved ranges for kdump kernel to use.
- Exclude the CMA-reserved ranges from the crash kernel memory to
prevent them from being exported through /proc/vmcore.
Update kernel-parameters.txt to document CMA support for crashkernel on
arm64 architecture.
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
v2:
- Free cmem in prepare_elf_headers()
- Add the mtivation.
---
Documentation/admin-guide/kernel-parameters.txt | 2 +-
arch/arm64/kernel/machine_kexec_file.c | 15 ++++++++++++++-
arch/arm64/mm/init.c | 5 +++--
3 files changed, 18 insertions(+), 4 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1058f2a6d6a8..36bb642a7edd 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1119,7 +1119,7 @@ Kernel parameters
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
crashkernel=size[KMG],cma
- [KNL, X86, ppc] Reserve additional crash kernel memory from
+ [KNL, X86, ARM64, ppc] Reserve additional crash kernel memory from
CMA. This reservation is usable by the first system's
userspace memory and kernel movable allocations (memory
balloon, zswap). Pages allocated from this memory range
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 410060ebd86d..ef6ce9aaba80 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -48,7 +48,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
u64 i;
phys_addr_t start, end;
- nr_ranges = 2; /* for exclusion of crashkernel region */
+ nr_ranges = 2 + crashk_cma_cnt; /* for exclusion of crashkernel region */
for_each_mem_range(i, &start, &end)
nr_ranges++;
@@ -64,6 +64,12 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
cmem->nr_ranges++;
}
+ for (i = 0; i < crashk_cma_cnt; i++) {
+ cmem->ranges[cmem->nr_ranges].start = crashk_cma_ranges[i].start;
+ cmem->ranges[cmem->nr_ranges].end = crashk_cma_ranges[i].end;
+ cmem->nr_ranges++;
+ }
+
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
if (ret)
@@ -75,6 +81,13 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
goto out;
}
+ for (i = 0; i < crashk_cma_cnt; ++i) {
+ ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
+ crashk_cma_ranges[i].end);
+ if (ret)
+ goto out;
+ }
+
ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
out:
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 524d34a0e921..28165d94af08 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -96,8 +96,8 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
static void __init arch_reserve_crashkernel(void)
{
+ unsigned long long crash_base, crash_size, cma_size = 0;
unsigned long long low_size = 0;
- unsigned long long crash_base, crash_size;
bool high = false;
int ret;
@@ -106,11 +106,12 @@ static void __init arch_reserve_crashkernel(void)
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base,
- &low_size, NULL, &high);
+ &low_size, &cma_size, &high);
if (ret)
return;
reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
+ reserve_crashkernel_cma(cma_size);
}
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
--
2.34.1
On Mon, Jan 26, 2026 at 04:13:34PM +0800, Jinjie Ruan wrote:
> Commit 35c18f2933c5 ("Add a new optional ",cma" suffix to the
> crashkernel= command line option") and commit ab475510e042 ("kdump:
> implement reserve_crashkernel_cma") added CMA support for kdump
> crashkernel reservation.
>
> Crash kernel memory reservation wastes production resources if too
> large, risks kdump failure if too small, and faces allocation difficulties
> on fragmented systems due to contiguous block constraints. The new
> CMA-based crashkernel reservation scheme splits the "large fixed
> reservation" into a "small fixed region + large CMA dynamic region": the
> CMA memory is available to userspace during normal operation to avoid
> waste, and is reclaimed for kdump upon crash—saving memory while
> improving reliability.
>
> So extend crashkernel CMA reservation support to arm64. The following
> changes are made to enable CMA reservation:
>
> - Parse and obtain the CMA reservation size along with other crashkernel
> parameters.
> - Call reserve_crashkernel_cma() to allocate the CMA region for kdump.
> - Include the CMA-reserved ranges for kdump kernel to use.
> - Exclude the CMA-reserved ranges from the crash kernel memory to
> prevent them from being exported through /proc/vmcore.
>
> Update kernel-parameters.txt to document CMA support for crashkernel on
> arm64 architecture.
I'm looking at this and at almost identical patch for riscv
https://lore.kernel.org/all/20260126080738.696723-1-ruanjinjie@huawei.com
and it feels wrong that we have duplicate the code that excludes cma
ranges.
CMA ranges are known to the crash_core and I don't see why we cannot
exclude them there.
> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
> ---
> v2:
> - Free cmem in prepare_elf_headers()
> - Add the mtivation.
> ---
> Documentation/admin-guide/kernel-parameters.txt | 2 +-
> arch/arm64/kernel/machine_kexec_file.c | 15 ++++++++++++++-
> arch/arm64/mm/init.c | 5 +++--
> 3 files changed, 18 insertions(+), 4 deletions(-)
>
> diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
> index 1058f2a6d6a8..36bb642a7edd 100644
> --- a/Documentation/admin-guide/kernel-parameters.txt
> +++ b/Documentation/admin-guide/kernel-parameters.txt
> @@ -1119,7 +1119,7 @@ Kernel parameters
> It will be ignored when crashkernel=X,high is not used
> or memory reserved is below 4G.
> crashkernel=size[KMG],cma
> - [KNL, X86, ppc] Reserve additional crash kernel memory from
> + [KNL, X86, ARM64, ppc] Reserve additional crash kernel memory from
> CMA. This reservation is usable by the first system's
> userspace memory and kernel movable allocations (memory
> balloon, zswap). Pages allocated from this memory range
> diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
> index 410060ebd86d..ef6ce9aaba80 100644
> --- a/arch/arm64/kernel/machine_kexec_file.c
> +++ b/arch/arm64/kernel/machine_kexec_file.c
> @@ -48,7 +48,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
> u64 i;
> phys_addr_t start, end;
>
> - nr_ranges = 2; /* for exclusion of crashkernel region */
> + nr_ranges = 2 + crashk_cma_cnt; /* for exclusion of crashkernel region */
> for_each_mem_range(i, &start, &end)
> nr_ranges++;
>
> @@ -64,6 +64,12 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
> cmem->nr_ranges++;
> }
>
> + for (i = 0; i < crashk_cma_cnt; i++) {
> + cmem->ranges[cmem->nr_ranges].start = crashk_cma_ranges[i].start;
> + cmem->ranges[cmem->nr_ranges].end = crashk_cma_ranges[i].end;
> + cmem->nr_ranges++;
> + }
> +
> /* Exclude crashkernel region */
> ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
> if (ret)
> @@ -75,6 +81,13 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
> goto out;
> }
>
> + for (i = 0; i < crashk_cma_cnt; ++i) {
> + ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
> + crashk_cma_ranges[i].end);
> + if (ret)
> + goto out;
> + }
> +
> ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
>
> out:
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 524d34a0e921..28165d94af08 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -96,8 +96,8 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
>
> static void __init arch_reserve_crashkernel(void)
> {
> + unsigned long long crash_base, crash_size, cma_size = 0;
> unsigned long long low_size = 0;
> - unsigned long long crash_base, crash_size;
> bool high = false;
> int ret;
>
> @@ -106,11 +106,12 @@ static void __init arch_reserve_crashkernel(void)
>
> ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
> &crash_size, &crash_base,
> - &low_size, NULL, &high);
> + &low_size, &cma_size, &high);
> if (ret)
> return;
>
> reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
> + reserve_crashkernel_cma(cma_size);
> }
>
> static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
> --
> 2.34.1
>
--
Sincerely yours,
Mike.
On 2026/1/28 16:31, Mike Rapoport wrote:
> On Mon, Jan 26, 2026 at 04:13:34PM +0800, Jinjie Ruan wrote:
>> Commit 35c18f2933c5 ("Add a new optional ",cma" suffix to the
>> crashkernel= command line option") and commit ab475510e042 ("kdump:
>> implement reserve_crashkernel_cma") added CMA support for kdump
>> crashkernel reservation.
>>
>> Crash kernel memory reservation wastes production resources if too
>> large, risks kdump failure if too small, and faces allocation difficulties
>> on fragmented systems due to contiguous block constraints. The new
>> CMA-based crashkernel reservation scheme splits the "large fixed
>> reservation" into a "small fixed region + large CMA dynamic region": the
>> CMA memory is available to userspace during normal operation to avoid
>> waste, and is reclaimed for kdump upon crash—saving memory while
>> improving reliability.
>>
>> So extend crashkernel CMA reservation support to arm64. The following
>> changes are made to enable CMA reservation:
>>
>> - Parse and obtain the CMA reservation size along with other crashkernel
>> parameters.
>> - Call reserve_crashkernel_cma() to allocate the CMA region for kdump.
>> - Include the CMA-reserved ranges for kdump kernel to use.
>> - Exclude the CMA-reserved ranges from the crash kernel memory to
>> prevent them from being exported through /proc/vmcore.
>>
>> Update kernel-parameters.txt to document CMA support for crashkernel on
>> arm64 architecture.
>
> I'm looking at this and at almost identical patch for riscv
> https://lore.kernel.org/all/20260126080738.696723-1-ruanjinjie@huawei.com
> and it feels wrong that we have duplicate the code that excludes cma
> ranges.
> CMA ranges are known to the crash_core and I don't see why we cannot
> exclude them there.
Youa are right, x86 and powerpc has similar implementations that
excludes crashkernel cma ranges.
x86 [1]
+ for (i = 0; i < crashk_cma_cnt; ++i) {
+ ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
+ crashk_cma_ranges[i].end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
But powerpc [2] is a little different which uses a wrapper for
crash_exclude_mem_range() and more check and realloc_mem_ranges().
+ for (i = 0; i < crashk_cma_cnt; ++i) {
+ ret = crash_exclude_mem_range_guarded(mem_ranges,
crashk_cma_ranges[i].start,
+ crashk_cma_ranges[i].end);
+ if (ret)
+ goto out;
+ }
[1]: https://lore.kernel.org/all/ZWEAWMJtesa3O9M5@dwarf.suse.cz/
[2]:
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b4a96ab50f368afc2360ff539a20254ca2c9a889
>
>> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
>> ---
>> v2:
>> - Free cmem in prepare_elf_headers()
>> - Add the mtivation.
>> ---
>> Documentation/admin-guide/kernel-parameters.txt | 2 +-
>> arch/arm64/kernel/machine_kexec_file.c | 15 ++++++++++++++-
>> arch/arm64/mm/init.c | 5 +++--
>> 3 files changed, 18 insertions(+), 4 deletions(-)
>>
>> diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
>> index 1058f2a6d6a8..36bb642a7edd 100644
>> --- a/Documentation/admin-guide/kernel-parameters.txt
>> +++ b/Documentation/admin-guide/kernel-parameters.txt
>> @@ -1119,7 +1119,7 @@ Kernel parameters
>> It will be ignored when crashkernel=X,high is not used
>> or memory reserved is below 4G.
>> crashkernel=size[KMG],cma
>> - [KNL, X86, ppc] Reserve additional crash kernel memory from
>> + [KNL, X86, ARM64, ppc] Reserve additional crash kernel memory from
>> CMA. This reservation is usable by the first system's
>> userspace memory and kernel movable allocations (memory
>> balloon, zswap). Pages allocated from this memory range
>> diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
>> index 410060ebd86d..ef6ce9aaba80 100644
>> --- a/arch/arm64/kernel/machine_kexec_file.c
>> +++ b/arch/arm64/kernel/machine_kexec_file.c
>> @@ -48,7 +48,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
>> u64 i;
>> phys_addr_t start, end;
>>
>> - nr_ranges = 2; /* for exclusion of crashkernel region */
>> + nr_ranges = 2 + crashk_cma_cnt; /* for exclusion of crashkernel region */
>> for_each_mem_range(i, &start, &end)
>> nr_ranges++;
>>
>> @@ -64,6 +64,12 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
>> cmem->nr_ranges++;
>> }
>>
>> + for (i = 0; i < crashk_cma_cnt; i++) {
>> + cmem->ranges[cmem->nr_ranges].start = crashk_cma_ranges[i].start;
>> + cmem->ranges[cmem->nr_ranges].end = crashk_cma_ranges[i].end;
>> + cmem->nr_ranges++;
>> + }
>> +
>> /* Exclude crashkernel region */
>> ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
>> if (ret)
>> @@ -75,6 +81,13 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
>> goto out;
>> }
>>
>> + for (i = 0; i < crashk_cma_cnt; ++i) {
>> + ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
>> + crashk_cma_ranges[i].end);
>> + if (ret)
>> + goto out;
>> + }
>> +
>> ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
>>
>> out:
>> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
>> index 524d34a0e921..28165d94af08 100644
>> --- a/arch/arm64/mm/init.c
>> +++ b/arch/arm64/mm/init.c
>> @@ -96,8 +96,8 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
>>
>> static void __init arch_reserve_crashkernel(void)
>> {
>> + unsigned long long crash_base, crash_size, cma_size = 0;
>> unsigned long long low_size = 0;
>> - unsigned long long crash_base, crash_size;
>> bool high = false;
>> int ret;
>>
>> @@ -106,11 +106,12 @@ static void __init arch_reserve_crashkernel(void)
>>
>> ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
>> &crash_size, &crash_base,
>> - &low_size, NULL, &high);
>> + &low_size, &cma_size, &high);
>> if (ret)
>> return;
>>
>> reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
>> + reserve_crashkernel_cma(cma_size);
>> }
>>
>> static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
>> --
>> 2.34.1
>>
>
On Wed, Jan 28, 2026 at 05:10:15PM +0800, Jinjie Ruan wrote:
>
>
> On 2026/1/28 16:31, Mike Rapoport wrote:
> > On Mon, Jan 26, 2026 at 04:13:34PM +0800, Jinjie Ruan wrote:
> >> Commit 35c18f2933c5 ("Add a new optional ",cma" suffix to the
> >> crashkernel= command line option") and commit ab475510e042 ("kdump:
> >> implement reserve_crashkernel_cma") added CMA support for kdump
> >> crashkernel reservation.
> >>
> >> Crash kernel memory reservation wastes production resources if too
> >> large, risks kdump failure if too small, and faces allocation difficulties
> >> on fragmented systems due to contiguous block constraints. The new
> >> CMA-based crashkernel reservation scheme splits the "large fixed
> >> reservation" into a "small fixed region + large CMA dynamic region": the
> >> CMA memory is available to userspace during normal operation to avoid
> >> waste, and is reclaimed for kdump upon crash—saving memory while
> >> improving reliability.
> >>
> >> So extend crashkernel CMA reservation support to arm64. The following
> >> changes are made to enable CMA reservation:
> >>
> >> - Parse and obtain the CMA reservation size along with other crashkernel
> >> parameters.
> >> - Call reserve_crashkernel_cma() to allocate the CMA region for kdump.
> >> - Include the CMA-reserved ranges for kdump kernel to use.
> >> - Exclude the CMA-reserved ranges from the crash kernel memory to
> >> prevent them from being exported through /proc/vmcore.
> >>
> >> Update kernel-parameters.txt to document CMA support for crashkernel on
> >> arm64 architecture.
> >
> > I'm looking at this and at almost identical patch for riscv
> > https://lore.kernel.org/all/20260126080738.696723-1-ruanjinjie@huawei.com
> > and it feels wrong that we have duplicate the code that excludes cma
> > ranges.
> > CMA ranges are known to the crash_core and I don't see why we cannot
> > exclude them there.
>
> Youa are right, x86 and powerpc has similar implementations that
> excludes crashkernel cma ranges.
>
> x86 [1]
>
> + for (i = 0; i < crashk_cma_cnt; ++i) {
> + ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
> + crashk_cma_ranges[i].end);
> + if (ret)
> + return ret;
> + }
So if this loop was in crash_prepare_elf64_headers() it would work for
arm64, riscv and x86, right?
> +
> + return 0;
>
> But powerpc [2] is a little different which uses a wrapper for
> crash_exclude_mem_range() and more check and realloc_mem_ranges().
>
> + for (i = 0; i < crashk_cma_cnt; ++i) {
> + ret = crash_exclude_mem_range_guarded(mem_ranges,
> crashk_cma_ranges[i].start,
> + crashk_cma_ranges[i].end);
> + if (ret)
> + goto out;
> + }
As for powerpc crash_exclude_mem_range_guarded() could only check if
mem_ranges is large enough and reallocate and then actual exclusion in
crash_prepare_elf64_headers() should also work.
> [1]: https://lore.kernel.org/all/ZWEAWMJtesa3O9M5@dwarf.suse.cz/
> [2]:
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b4a96ab50f368afc2360ff539a20254ca2c9a889
--
Sincerely yours,
Mike.
On Mon, 26 Jan 2026, at 09:13, Jinjie Ruan wrote:
> Commit 35c18f2933c5 ("Add a new optional ",cma" suffix to the
> crashkernel= command line option") and commit ab475510e042 ("kdump:
> implement reserve_crashkernel_cma") added CMA support for kdump
> crashkernel reservation.
>
> Crash kernel memory reservation wastes production resources if too
> large, risks kdump failure if too small, and faces allocation difficulties
> on fragmented systems due to contiguous block constraints. The new
> CMA-based crashkernel reservation scheme splits the "large fixed
> reservation" into a "small fixed region + large CMA dynamic region": the
> CMA memory is available to userspace during normal operation to avoid
> waste, and is reclaimed for kdump upon crash—saving memory while
> improving reliability.
>
> So extend crashkernel CMA reservation support to arm64. The following
> changes are made to enable CMA reservation:
>
> - Parse and obtain the CMA reservation size along with other crashkernel
> parameters.
> - Call reserve_crashkernel_cma() to allocate the CMA region for kdump.
> - Include the CMA-reserved ranges for kdump kernel to use.
> - Exclude the CMA-reserved ranges from the crash kernel memory to
> prevent them from being exported through /proc/vmcore.
>
> Update kernel-parameters.txt to document CMA support for crashkernel on
> arm64 architecture.
>
> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
> ---
> v2:
> - Free cmem in prepare_elf_headers()
> - Add the mtivation.
> ---
> Documentation/admin-guide/kernel-parameters.txt | 2 +-
> arch/arm64/kernel/machine_kexec_file.c | 15 ++++++++++++++-
> arch/arm64/mm/init.c | 5 +++--
> 3 files changed, 18 insertions(+), 4 deletions(-)
>
Thanks for respinning the commit log.
I am not an expert but this looks reasonable to me, so
Acked-by: Ard Biesheuvel <ardb@kernel.org>
> diff --git a/Documentation/admin-guide/kernel-parameters.txt
> b/Documentation/admin-guide/kernel-parameters.txt
> index 1058f2a6d6a8..36bb642a7edd 100644
> --- a/Documentation/admin-guide/kernel-parameters.txt
> +++ b/Documentation/admin-guide/kernel-parameters.txt
> @@ -1119,7 +1119,7 @@ Kernel parameters
> It will be ignored when crashkernel=X,high is not used
> or memory reserved is below 4G.
> crashkernel=size[KMG],cma
> - [KNL, X86, ppc] Reserve additional crash kernel memory from
> + [KNL, X86, ARM64, ppc] Reserve additional crash kernel memory from
> CMA. This reservation is usable by the first system's
> userspace memory and kernel movable allocations (memory
> balloon, zswap). Pages allocated from this memory range
> diff --git a/arch/arm64/kernel/machine_kexec_file.c
> b/arch/arm64/kernel/machine_kexec_file.c
> index 410060ebd86d..ef6ce9aaba80 100644
> --- a/arch/arm64/kernel/machine_kexec_file.c
> +++ b/arch/arm64/kernel/machine_kexec_file.c
> @@ -48,7 +48,7 @@ static int prepare_elf_headers(void **addr, unsigned
> long *sz)
> u64 i;
> phys_addr_t start, end;
>
> - nr_ranges = 2; /* for exclusion of crashkernel region */
> + nr_ranges = 2 + crashk_cma_cnt; /* for exclusion of crashkernel region */
> for_each_mem_range(i, &start, &end)
> nr_ranges++;
>
> @@ -64,6 +64,12 @@ static int prepare_elf_headers(void **addr, unsigned
> long *sz)
> cmem->nr_ranges++;
> }
>
> + for (i = 0; i < crashk_cma_cnt; i++) {
> + cmem->ranges[cmem->nr_ranges].start = crashk_cma_ranges[i].start;
> + cmem->ranges[cmem->nr_ranges].end = crashk_cma_ranges[i].end;
> + cmem->nr_ranges++;
> + }
> +
> /* Exclude crashkernel region */
> ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
> if (ret)
> @@ -75,6 +81,13 @@ static int prepare_elf_headers(void **addr, unsigned
> long *sz)
> goto out;
> }
>
> + for (i = 0; i < crashk_cma_cnt; ++i) {
> + ret = crash_exclude_mem_range(cmem, crashk_cma_ranges[i].start,
> + crashk_cma_ranges[i].end);
> + if (ret)
> + goto out;
> + }
> +
> ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
>
> out:
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 524d34a0e921..28165d94af08 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -96,8 +96,8 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
>
> static void __init arch_reserve_crashkernel(void)
> {
> + unsigned long long crash_base, crash_size, cma_size = 0;
> unsigned long long low_size = 0;
> - unsigned long long crash_base, crash_size;
> bool high = false;
> int ret;
>
> @@ -106,11 +106,12 @@ static void __init arch_reserve_crashkernel(void)
>
> ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
> &crash_size, &crash_base,
> - &low_size, NULL, &high);
> + &low_size, &cma_size, &high);
> if (ret)
> return;
>
> reserve_crashkernel_generic(crash_size, crash_base, low_size, high);
> + reserve_crashkernel_cma(cma_size);
> }
>
> static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
> --
> 2.34.1
© 2016 - 2026 Red Hat, Inc.