arch/x86/mm/init.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)
At least with CONFIG_PHYSICAL_START=0x100000, if there is < 4 MiB of contiguous
free memory available at this point, the kernel will crash and burn because
memblock_phys_alloc_range returns 0 on failure, which leads memblock_phys_free
to throw the first 4 MiB of physical memory to the wolves. At a minimum it
should fail gracefully with a meaningful diagnostic, but in fact everything
seems to work fine without the weird reserve allocation.
---
arch/x86/mm/init.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index eb503f5..3696770 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -640,8 +640,13 @@ static void __init memory_map_top_down(unsigned long
map_start,
*/
addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
map_end);
- memblock_phys_free(addr, PMD_SIZE);
- real_end = addr + PMD_SIZE;
+ if (unlikely(addr < map_start)) {
+ pr_warn("Failed to release memory for alloc_low_pages()");
+ real_end = ALIGN_DOWN(map_end, PMD_SIZE);
+ } else {
+ memblock_phys_free(addr, PMD_SIZE);
+ real_end = addr + PMD_SIZE;
+ }
/* step_size need to be small so pgt_buf from BRK could cover it
*/
step_size = PMD_SIZE;
--
2.34.0
* gldrk <me@rarity.fan> wrote: > At least with CONFIG_PHYSICAL_START=0x100000, if there is < 4 MiB of contiguous > free memory available at this point, the kernel will crash and burn because > memblock_phys_alloc_range returns 0 on failure, which leads memblock_phys_free > to throw the first 4 MiB of physical memory to the wolves. At a minimum it > should fail gracefully with a meaningful diagnostic, but in fact everything > seems to work fine without the weird reserve allocation. > > --- > arch/x86/mm/init.c | 9 +++++++-- > 1 file changed, 7 insertions(+), 2 deletions(-) > > diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c > index eb503f5..3696770 100644 > --- a/arch/x86/mm/init.c > +++ b/arch/x86/mm/init.c > @@ -640,8 +640,13 @@ static void __init memory_map_top_down(unsigned long > map_start, > */ > addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, > map_end); > - memblock_phys_free(addr, PMD_SIZE); > - real_end = addr + PMD_SIZE; > + if (unlikely(addr < map_start)) { > + pr_warn("Failed to release memory for alloc_low_pages()"); > + real_end = ALIGN_DOWN(map_end, PMD_SIZE); > + } else { > + memblock_phys_free(addr, PMD_SIZE); > + real_end = addr + PMD_SIZE; > + } Makes sense to fix this bug I suppose, but the usual error check pattern for memblock_phys_alloc_range() failure would not be 'addr < map_start' but !addr. ( If memblock_phys_alloc_range() succeeds but returns an address below 'map_start', that would be a different failure I guess. ) Also, no need to add the 'unlikely()' I suspect - this is early boot code, micro-performance of branching is immaterial. Just curious: what type of system has < 4 MiB of contiguous free memory available in early boot? Or was it something intentionally constrained via qemu? Thanks, Ingo
> Just curious: what type of system has < 4 MiB of contiguous free memory > available in early boot? Or was it something intentionally constrained > via qemu? Yes, I was able to boot a basic Alpine system off virtiofs with a few MiB total and a stripped-down config. I happen to have a memory-starved 486 machine that is technically "supported", but it's not running Linux just yet. Here's an updated patch. Signed-off-by: Philip Redkin <me@rarity.fan> --- arch/x86/mm/init.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index eb503f5..6c4ec4f 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -640,8 +640,13 @@ static void __init memory_map_top_down(unsigned long map_start, */ addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, map_end); - memblock_phys_free(addr, PMD_SIZE); - real_end = addr + PMD_SIZE; + if (!addr) { + pr_warn("Failed to release memory for alloc_low_pages()"); + real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE)); + } else { + memblock_phys_free(addr, PMD_SIZE); + real_end = addr + PMD_SIZE; + } /* step_size need to be small so pgt_buf from BRK could cover it */ step_size = PMD_SIZE; -- 2.34.0
© 2016 - 2024 Red Hat, Inc.