From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
When CONFIG_HIGMEM is enabled, __cma_declare_contiguous_nid() first
tries to allocate the area from HIGHMEM and if that fails it falls back
to allocation from low memory.
Split allocation from HIGMEM into a helper function to further decouple
logic related to CONFIG_HIGHMEM.
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
---
mm/cma.c | 52 +++++++++++++++++++++++++++++-----------------------
1 file changed, 29 insertions(+), 23 deletions(-)
diff --git a/mm/cma.c b/mm/cma.c
index 1df8ff312d99..0a24c46f3296 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -376,6 +376,30 @@ static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size)
return 0;
}
+static phys_addr_t __init cma_alloc_highmem(phys_addr_t base, phys_addr_t size,
+ phys_addr_t align, phys_addr_t *limit, int nid)
+{
+ phys_addr_t addr = 0;
+
+ if (IS_ENABLED(CONFIG_HIGHMEM)) {
+ phys_addr_t highmem = __pa(high_memory - 1) + 1;
+
+ /*
+ * All pages in the reserved area must come from the same zone.
+ * If the requested region crosses the low/high memory boundary,
+ * try allocating from high memory first and fall back to low
+ * memory in case of failure.
+ */
+ if (base < highmem && *limit > highmem) {
+ addr = memblock_alloc_range_nid(size, align, highmem,
+ *limit, nid, true);
+ *limit = highmem;
+ }
+ }
+
+ return addr;
+}
+
static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
@@ -383,19 +407,9 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
int nid)
{
phys_addr_t memblock_end = memblock_end_of_DRAM();
- phys_addr_t highmem_start, base = *basep;
+ phys_addr_t base = *basep;
int ret;
- /*
- * We can't use __pa(high_memory) directly, since high_memory
- * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
- * complain. Find the boundary by adding one to the last valid
- * address.
- */
- if (IS_ENABLED(CONFIG_HIGHMEM))
- highmem_start = __pa(high_memory - 1) + 1;
- else
- highmem_start = memblock_end_of_DRAM();
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
__func__, &size, &base, &limit, &alignment);
@@ -472,18 +486,10 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
}
#endif
- /*
- * All pages in the reserved area must come from the same zone.
- * If the requested region crosses the low/high memory boundary,
- * try allocating from high memory first and fall back to low
- * memory in case of failure.
- */
- if (!addr && base < highmem_start && limit > highmem_start) {
- addr = memblock_alloc_range_nid(size, alignment,
- highmem_start, limit, nid, true);
- limit = highmem_start;
- }
-
+ /* On systems with HIGHMEM try allocating from there first */
+ if (!addr)
+ addr = cma_alloc_highmem(base, size, alignment, &limit,
+ nid);
if (!addr) {
addr = memblock_alloc_range_nid(size, alignment, base,
limit, nid, true);
--
2.47.2
On Wed, Jul 02, 2025 at 08:36:05PM +0300, Mike Rapoport wrote:
> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
>
> When CONFIG_HIGMEM is enabled, __cma_declare_contiguous_nid() first
> tries to allocate the area from HIGHMEM and if that fails it falls back
> to allocation from low memory.
>
> Split allocation from HIGMEM into a helper function to further decouple
> logic related to CONFIG_HIGHMEM.
>
> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> ---
> mm/cma.c | 52 +++++++++++++++++++++++++++++-----------------------
> 1 file changed, 29 insertions(+), 23 deletions(-)
>
> diff --git a/mm/cma.c b/mm/cma.c
> index 1df8ff312d99..0a24c46f3296 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -376,6 +376,30 @@ static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size)
> return 0;
> }
>
> +static phys_addr_t __init cma_alloc_highmem(phys_addr_t base, phys_addr_t size,
> + phys_addr_t align, phys_addr_t *limit, int nid)
> +{
> + phys_addr_t addr = 0;
> +
> + if (IS_ENABLED(CONFIG_HIGHMEM)) {
> + phys_addr_t highmem = __pa(high_memory - 1) + 1;
> +
> + /*
> + * All pages in the reserved area must come from the same zone.
> + * If the requested region crosses the low/high memory boundary,
> + * try allocating from high memory first and fall back to low
> + * memory in case of failure.
> + */
> + if (base < highmem && *limit > highmem) {
> + addr = memblock_alloc_range_nid(size, align, highmem,
> + *limit, nid, true);
> + *limit = highmem;
> + }
> + }
Not a big deal, but maybe better to do it in one function? Maybe even move
the CONFIG_PHYS_ADDR_T_64BIT block in there as well? So memblock_alloc_range_nid()
calls would be contained in one place and the X86_64/HIGHMEM comments as
well.
Just a thought.
diff --git a/mm/cma.c b/mm/cma.c
index dd7643fc01db..532b56e6971a 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -377,11 +377,12 @@ static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size)
return 0;
}
-static phys_addr_t __init cma_alloc_highmem(phys_addr_t base, phys_addr_t size,
- phys_addr_t align, phys_addr_t *limit, int nid)
+static phys_addr_t __init cma_alloc_mem(phys_addr_t base, phys_addr_t size,
+ phys_addr_t align, phys_addr_t limit, int nid)
{
phys_addr_t addr = 0;
+ /* On systems with HIGHMEM try allocating from there first */
if (IS_ENABLED(CONFIG_HIGHMEM)) {
phys_addr_t highmem = __pa(high_memory - 1) + 1;
@@ -393,11 +394,15 @@ static phys_addr_t __init cma_alloc_highmem(phys_addr_t base, phys_addr_t size,
*/
if (base < highmem && *limit > highmem) {
addr = memblock_alloc_range_nid(size, align, highmem,
- *limit, nid, true);
+ limit, nid, true);
*limit = highmem;
}
}
+ if (!addr)
+ addr = memblock_alloc_range_nid(size, alignment, base,
+ limit, nid, true);
+
return addr;
}
@@ -487,16 +492,8 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t *basep,
}
#endif
- /* On systems with HIGHMEM try allocating from there first */
if (!addr)
- addr = cma_alloc_highmem(base, size, alignment, &limit,
- nid);
- if (!addr) {
- addr = memblock_alloc_range_nid(size, alignment, base,
- limit, nid, true);
- if (!addr)
- return -ENOMEM;
- }
+ addr = cma_alloc_mem(base, size, alignment, limit, nid);
/*
* kmemleak scans/reads tracked objects for pointers to other
--
Oscar Salvador
SUSE Labs
On Thu, Jul 03, 2025 at 11:53:06AM +0200, Oscar Salvador wrote:
> On Wed, Jul 02, 2025 at 08:36:05PM +0300, Mike Rapoport wrote:
> > From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
> >
> > When CONFIG_HIGMEM is enabled, __cma_declare_contiguous_nid() first
> > tries to allocate the area from HIGHMEM and if that fails it falls back
> > to allocation from low memory.
> >
> > Split allocation from HIGMEM into a helper function to further decouple
> > logic related to CONFIG_HIGHMEM.
> >
> > Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
> > ---
> > mm/cma.c | 52 +++++++++++++++++++++++++++++-----------------------
> > 1 file changed, 29 insertions(+), 23 deletions(-)
> >
> > diff --git a/mm/cma.c b/mm/cma.c
> > index 1df8ff312d99..0a24c46f3296 100644
> > --- a/mm/cma.c
> > +++ b/mm/cma.c
> > @@ -376,6 +376,30 @@ static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size)
> > return 0;
> > }
> >
> > +static phys_addr_t __init cma_alloc_highmem(phys_addr_t base, phys_addr_t size,
> > + phys_addr_t align, phys_addr_t *limit, int nid)
> > +{
> > + phys_addr_t addr = 0;
> > +
> > + if (IS_ENABLED(CONFIG_HIGHMEM)) {
> > + phys_addr_t highmem = __pa(high_memory - 1) + 1;
> > +
> > + /*
> > + * All pages in the reserved area must come from the same zone.
> > + * If the requested region crosses the low/high memory boundary,
> > + * try allocating from high memory first and fall back to low
> > + * memory in case of failure.
> > + */
> > + if (base < highmem && *limit > highmem) {
> > + addr = memblock_alloc_range_nid(size, align, highmem,
> > + *limit, nid, true);
> > + *limit = highmem;
> > + }
> > + }
>
> Not a big deal, but maybe better to do it in one function? Maybe even move
> the CONFIG_PHYS_ADDR_T_64BIT block in there as well? So memblock_alloc_range_nid()
> calls would be contained in one place and the X86_64/HIGHMEM comments as
> well.
> Just a thought.
Yeah, this will be neater, thanks!
Will send v2 shortly.
> --
> Oscar Salvador
> SUSE Labs
--
Sincerely yours,
Mike.
On 03.07.25 11:53, Oscar Salvador wrote:
> On Wed, Jul 02, 2025 at 08:36:05PM +0300, Mike Rapoport wrote:
>> From: "Mike Rapoport (Microsoft)" <rppt@kernel.org>
>>
>> When CONFIG_HIGMEM is enabled, __cma_declare_contiguous_nid() first
>> tries to allocate the area from HIGHMEM and if that fails it falls back
>> to allocation from low memory.
>>
>> Split allocation from HIGMEM into a helper function to further decouple
>> logic related to CONFIG_HIGHMEM.
>>
>> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
>> ---
>> mm/cma.c | 52 +++++++++++++++++++++++++++++-----------------------
>> 1 file changed, 29 insertions(+), 23 deletions(-)
>>
>> diff --git a/mm/cma.c b/mm/cma.c
>> index 1df8ff312d99..0a24c46f3296 100644
>> --- a/mm/cma.c
>> +++ b/mm/cma.c
>> @@ -376,6 +376,30 @@ static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size)
>> return 0;
>> }
>>
>> +static phys_addr_t __init cma_alloc_highmem(phys_addr_t base, phys_addr_t size,
>> + phys_addr_t align, phys_addr_t *limit, int nid)
>> +{
>> + phys_addr_t addr = 0;
>> +
>> + if (IS_ENABLED(CONFIG_HIGHMEM)) {
>> + phys_addr_t highmem = __pa(high_memory - 1) + 1;
>> +
>> + /*
>> + * All pages in the reserved area must come from the same zone.
>> + * If the requested region crosses the low/high memory boundary,
>> + * try allocating from high memory first and fall back to low
>> + * memory in case of failure.
>> + */
>> + if (base < highmem && *limit > highmem) {
>> + addr = memblock_alloc_range_nid(size, align, highmem,
>> + *limit, nid, true);
>> + *limit = highmem;
>> + }
>> + }
>
> Not a big deal, but maybe better to do it in one function?
Yes, same thought here.
--
Cheers,
David / dhildenb
© 2016 - 2026 Red Hat, Inc.