arch/arm64/mm/init.c | 7 +++++++ 1 file changed, 7 insertions(+)
If CONFIG_ARCH_KEEP_MEMBLOCK is enabled, the memory information in
memblock will be retained. We release the __init memory here, and
we should also delete the corresponding region in memblock.reserved.
Signed-off-by: Rong Qianfeng <rongqianfeng@vivo.com>
---
arch/arm64/mm/init.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index bfb10969cbf0..99cfa217e905 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -402,6 +402,13 @@ void __init mem_init(void)
void free_initmem(void)
{
+ if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
+ unsigned long aligned_begin = ALIGN_DOWN((u64)__init_begin, PAGE_SIZE);
+ unsigned long aligned_end = ALIGN((u64)__init_end, PAGE_SIZE);
+
+ memblock_free((void *)aligned_begin, aligned_end - aligned_begin);
+ }
+
free_reserved_area(lm_alias(__init_begin),
lm_alias(__init_end),
POISON_FREE_INITMEM, "unused kernel");
--
2.39.0
Hi Rong,
On Fri, 30 Aug 2024 at 10:00, Rong Qianfeng <rongqianfeng@vivo.com> wrote:
>
> If CONFIG_ARCH_KEEP_MEMBLOCK is enabled, the memory information in
> memblock will be retained. We release the __init memory here, and
> we should also delete the corresponding region in memblock.reserved.
>
> Signed-off-by: Rong Qianfeng <rongqianfeng@vivo.com>
> ---
> arch/arm64/mm/init.c | 7 +++++++
> 1 file changed, 7 insertions(+)
>
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index bfb10969cbf0..99cfa217e905 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -402,6 +402,13 @@ void __init mem_init(void)
>
> void free_initmem(void)
> {
> + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
This is always true on arm64
> + unsigned long aligned_begin = ALIGN_DOWN((u64)__init_begin, PAGE_SIZE);
> + unsigned long aligned_end = ALIGN((u64)__init_end, PAGE_SIZE);
> +
> + memblock_free((void *)aligned_begin, aligned_end - aligned_begin);
> + }
> +
What does this achieve? The memory is already being reused by the page
allocator (due to free_reserved_area()), and the memblock allocator is
no longer usable at this point anyway.
> free_reserved_area(lm_alias(__init_begin),
> lm_alias(__init_end),
> POISON_FREE_INITMEM, "unused kernel");
在 2024/8/30 16:14, Ard Biesheuvel 写道:
> Hi Rong,
>
> On Fri, 30 Aug 2024 at 10:00, Rong Qianfeng <rongqianfeng@vivo.com> wrote:
>> If CONFIG_ARCH_KEEP_MEMBLOCK is enabled, the memory information in
>> memblock will be retained. We release the __init memory here, and
>> we should also delete the corresponding region in memblock.reserved.
>>
>> Signed-off-by: Rong Qianfeng <rongqianfeng@vivo.com>
>> ---
>> arch/arm64/mm/init.c | 7 +++++++
>> 1 file changed, 7 insertions(+)
>>
>> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
>> index bfb10969cbf0..99cfa217e905 100644
>> --- a/arch/arm64/mm/init.c
>> +++ b/arch/arm64/mm/init.c
>> @@ -402,6 +402,13 @@ void __init mem_init(void)
>>
>> void free_initmem(void)
>> {
>> + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
> This is always true on arm64
>
>> + unsigned long aligned_begin = ALIGN_DOWN((u64)__init_begin, PAGE_SIZE);
>> + unsigned long aligned_end = ALIGN((u64)__init_end, PAGE_SIZE);
>> +
>> + memblock_free((void *)aligned_begin, aligned_end - aligned_begin);
>> + }
>> +
> What does this achieve? The memory is already being reused by the page
> allocator (due to free_reserved_area()), and the memblock allocator is
> no longer usable at this point anyway.
If not, incorrect information will be visible in
/sys/kernel/debug/memblock/reserved.
Best Regards,
Qianfeng
>
>> free_reserved_area(lm_alias(__init_begin),
>> lm_alias(__init_end),
>> POISON_FREE_INITMEM, "unused kernel");
© 2016 - 2026 Red Hat, Inc.