From nobody Wed Oct 8 03:44:53 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4236A303DC4 for ; Wed, 2 Jul 2025 17:36:14 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751477777; cv=none; b=s40pQnMmuaeXaAlRw0Ogmw4VqQp+o52fmUJDMnhsP3hUsCfKLBkOYOOhER5MoxynTkKsubpBUeFszYecqTjU8bA/jnPAEsZEUeQzibxT41jtFhAGzKNvkjsHXYI9AqGzrfNpw/RnbZkRtFI/F01Uqta0gcI7fHxpwrIWkV9IEGg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751477777; c=relaxed/simple; bh=IcZVDD4PqCPvRH9Tv0Yzxexs8Xfs5CVXiVFXXhDy2C0=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=MLrYgvUBW/NdLV15TT+Rv0BdDTEH51ZjGE2MwqpDhEVpXdX8TW+NRX1ltnyBWBKrhqasuZGg0GkRvchjG0Bt3rkHLFdZTKezJAwOhpWV+UE1GQNQ3wiyvWlbziArCooxKJs5IqeYbO0JfZ5VPZyf1nmM0cVMrxtTBIYg+r9HCc8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Dm8kvj85; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Dm8kvj85" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 96EF6C4CEEE; Wed, 2 Jul 2025 17:36:12 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1751477774; bh=IcZVDD4PqCPvRH9Tv0Yzxexs8Xfs5CVXiVFXXhDy2C0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=Dm8kvj85eAA+xP+KHiFlmlOZjR+bqAodYmMkoKUruVf0dTyLyoFRVu+cK07+JJ6V7 r9EyXym8DbdsIKUBsKDR3socBoBq68tLhycyV0HHoRJ4f+8TeXdmrSmk/00RA+gFHE T66U26mL2KxDF6GMMulH0K/G25NsJH51UE8XR7xHoPMLRmAvSSseDiW2Fg0n1P7s48 ytBiaUtGqlF8QtVFGcmhqFwInk9GtyT3Rdy447ZpHr+AUsozd7LVz9rI3Bp96FGX60 K7oPpxKGnuwgUVpBwzr+fnFx5nMihuFhjA4QuElNM4texmWvL1+Z/wgxAkt0KGqXVK rNLSMKB4f80sA== From: Mike Rapoport To: Andrew Morton Cc: Alexandre Ghiti , David Hildenbrand , Mike Rapoport , Oscar Salvador , Pratyush Yadav , linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: [PATCH 1/3] cma: move __cma_declare_contiguous_nid() before its usage Date: Wed, 2 Jul 2025 20:36:03 +0300 Message-ID: <20250702173605.2198924-2-rppt@kernel.org> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250702173605.2198924-1-rppt@kernel.org> References: <20250702173605.2198924-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" and kill static declaration Signed-off-by: Mike Rapoport (Microsoft) Acked-by: David Hildenbrand Acked-by: Oscar Salvador --- mm/cma.c | 294 +++++++++++++++++++++++++++---------------------------- 1 file changed, 144 insertions(+), 150 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 397567883a10..9bf95f8f0f33 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -35,12 +35,6 @@ struct cma cma_areas[MAX_CMA_AREAS]; unsigned int cma_area_count; =20 -static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, - phys_addr_t size, phys_addr_t limit, - phys_addr_t alignment, unsigned int order_per_bit, - bool fixed, const char *name, struct cma **res_cma, - int nid); - phys_addr_t cma_get_base(const struct cma *cma) { WARN_ON_ONCE(cma->nranges !=3D 1); @@ -358,6 +352,150 @@ static void __init list_insert_sorted( } } =20 +static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + bool fixed, const char *name, struct cma **res_cma, + int nid) +{ + phys_addr_t memblock_end =3D memblock_end_of_DRAM(); + phys_addr_t highmem_start, base =3D *basep; + int ret; + + /* + * We can't use __pa(high_memory) directly, since high_memory + * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) + * complain. Find the boundary by adding one to the last valid + * address. + */ + if (IS_ENABLED(CONFIG_HIGHMEM)) + highmem_start =3D __pa(high_memory - 1) + 1; + else + highmem_start =3D memblock_end_of_DRAM(); + pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", + __func__, &size, &base, &limit, &alignment); + + if (cma_area_count =3D=3D ARRAY_SIZE(cma_areas)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + if (!size) + return -EINVAL; + + if (alignment && !is_power_of_2(alignment)) + return -EINVAL; + + if (!IS_ENABLED(CONFIG_NUMA)) + nid =3D NUMA_NO_NODE; + + /* Sanitise input arguments. */ + alignment =3D max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); + if (fixed && base & (alignment - 1)) { + pr_err("Region at %pa must be aligned to %pa bytes\n", + &base, &alignment); + return -EINVAL; + } + base =3D ALIGN(base, alignment); + size =3D ALIGN(size, alignment); + limit &=3D ~(alignment - 1); + + if (!base) + fixed =3D false; + + /* size should be aligned with order_per_bit */ + if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + /* + * If allocating at a fixed base the request region must not cross the + * low/high memory boundary. + */ + if (fixed && base < highmem_start && base + size > highmem_start) { + pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", + &base, &highmem_start); + return -EINVAL; + } + + /* + * If the limit is unspecified or above the memblock end, its effective + * value will be the memblock end. Set it explicitly to simplify further + * checks. + */ + if (limit =3D=3D 0 || limit > memblock_end) + limit =3D memblock_end; + + if (base + size > limit) { + pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", + &size, &base, &limit); + return -EINVAL; + } + + /* Reserve memory */ + if (fixed) { + if (memblock_is_region_reserved(base, size) || + memblock_reserve(base, size) < 0) { + return -EBUSY; + } + } else { + phys_addr_t addr =3D 0; + + /* + * If there is enough memory, try a bottom-up allocation first. + * It will place the new cma area close to the start of the node + * and guarantee that the compaction is moving pages out of the + * cma area and not into it. + * Avoid using first 4GB to not interfere with constrained zones + * like DMA/DMA32. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!memblock_bottom_up() && memblock_end >=3D SZ_4G + size) { + memblock_set_bottom_up(true); + addr =3D memblock_alloc_range_nid(size, alignment, SZ_4G, + limit, nid, true); + memblock_set_bottom_up(false); + } +#endif + + /* + * All pages in the reserved area must come from the same zone. + * If the requested region crosses the low/high memory boundary, + * try allocating from high memory first and fall back to low + * memory in case of failure. + */ + if (!addr && base < highmem_start && limit > highmem_start) { + addr =3D memblock_alloc_range_nid(size, alignment, + highmem_start, limit, nid, true); + limit =3D highmem_start; + } + + if (!addr) { + addr =3D memblock_alloc_range_nid(size, alignment, base, + limit, nid, true); + if (!addr) + return -ENOMEM; + } + + /* + * kmemleak scans/reads tracked objects for pointers to other + * objects but this address isn't mapped and accessible + */ + kmemleak_ignore_phys(addr); + base =3D addr; + } + + ret =3D cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); + if (ret) { + memblock_phys_free(base, size); + return ret; + } + + (*res_cma)->nid =3D nid; + *basep =3D base; + + return 0; +} + /* * Create CMA areas with a total size of @total_size. A normal allocation * for one area is tried first. If that fails, the biggest memblock @@ -593,150 +731,6 @@ int __init cma_declare_contiguous_nid(phys_addr_t bas= e, return ret; } =20 -static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, - phys_addr_t size, phys_addr_t limit, - phys_addr_t alignment, unsigned int order_per_bit, - bool fixed, const char *name, struct cma **res_cma, - int nid) -{ - phys_addr_t memblock_end =3D memblock_end_of_DRAM(); - phys_addr_t highmem_start, base =3D *basep; - int ret; - - /* - * We can't use __pa(high_memory) directly, since high_memory - * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) - * complain. Find the boundary by adding one to the last valid - * address. - */ - if (IS_ENABLED(CONFIG_HIGHMEM)) - highmem_start =3D __pa(high_memory - 1) + 1; - else - highmem_start =3D memblock_end_of_DRAM(); - pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", - __func__, &size, &base, &limit, &alignment); - - if (cma_area_count =3D=3D ARRAY_SIZE(cma_areas)) { - pr_err("Not enough slots for CMA reserved regions!\n"); - return -ENOSPC; - } - - if (!size) - return -EINVAL; - - if (alignment && !is_power_of_2(alignment)) - return -EINVAL; - - if (!IS_ENABLED(CONFIG_NUMA)) - nid =3D NUMA_NO_NODE; - - /* Sanitise input arguments. */ - alignment =3D max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); - if (fixed && base & (alignment - 1)) { - pr_err("Region at %pa must be aligned to %pa bytes\n", - &base, &alignment); - return -EINVAL; - } - base =3D ALIGN(base, alignment); - size =3D ALIGN(size, alignment); - limit &=3D ~(alignment - 1); - - if (!base) - fixed =3D false; - - /* size should be aligned with order_per_bit */ - if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) - return -EINVAL; - - /* - * If allocating at a fixed base the request region must not cross the - * low/high memory boundary. - */ - if (fixed && base < highmem_start && base + size > highmem_start) { - pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", - &base, &highmem_start); - return -EINVAL; - } - - /* - * If the limit is unspecified or above the memblock end, its effective - * value will be the memblock end. Set it explicitly to simplify further - * checks. - */ - if (limit =3D=3D 0 || limit > memblock_end) - limit =3D memblock_end; - - if (base + size > limit) { - pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", - &size, &base, &limit); - return -EINVAL; - } - - /* Reserve memory */ - if (fixed) { - if (memblock_is_region_reserved(base, size) || - memblock_reserve(base, size) < 0) { - return -EBUSY; - } - } else { - phys_addr_t addr =3D 0; - - /* - * If there is enough memory, try a bottom-up allocation first. - * It will place the new cma area close to the start of the node - * and guarantee that the compaction is moving pages out of the - * cma area and not into it. - * Avoid using first 4GB to not interfere with constrained zones - * like DMA/DMA32. - */ -#ifdef CONFIG_PHYS_ADDR_T_64BIT - if (!memblock_bottom_up() && memblock_end >=3D SZ_4G + size) { - memblock_set_bottom_up(true); - addr =3D memblock_alloc_range_nid(size, alignment, SZ_4G, - limit, nid, true); - memblock_set_bottom_up(false); - } -#endif - - /* - * All pages in the reserved area must come from the same zone. - * If the requested region crosses the low/high memory boundary, - * try allocating from high memory first and fall back to low - * memory in case of failure. - */ - if (!addr && base < highmem_start && limit > highmem_start) { - addr =3D memblock_alloc_range_nid(size, alignment, - highmem_start, limit, nid, true); - limit =3D highmem_start; - } - - if (!addr) { - addr =3D memblock_alloc_range_nid(size, alignment, base, - limit, nid, true); - if (!addr) - return -ENOMEM; - } - - /* - * kmemleak scans/reads tracked objects for pointers to other - * objects but this address isn't mapped and accessible - */ - kmemleak_ignore_phys(addr); - base =3D addr; - } - - ret =3D cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); - if (ret) { - memblock_phys_free(base, size); - return ret; - } - - (*res_cma)->nid =3D nid; - *basep =3D base; - - return 0; -} - static void cma_debug_show_areas(struct cma *cma) { unsigned long next_zero_bit, next_set_bit, nr_zero; --=20 2.47.2 From nobody Wed Oct 8 03:44:53 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1B552303DD4 for ; Wed, 2 Jul 2025 17:36:17 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751477779; cv=none; b=aPIMe186guxnxJ8t0W39+aNfuIJGGPP3YTzJRItuHAzaqvv+N3bUnuscCwxbJLMHXw8d/Tzor5XmHncw1M+vqGw781YdpAGhdDNWDLfaST8JPQP5Iw2zji7wrFFV9MqAMaJhJnCWVmPr0kthAO2CeeoRwq5Ev8HuB/+ynsBB+d0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751477779; c=relaxed/simple; bh=EXacDh63KkBz1wR1cI8SHJiNqTMma8mwS1tgtRPH3wA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=AWywZ3cIhbb/SquIoWsMmkogWWy7VkrTbMLTbrJIKCLpTOuk9WPrk002QGpW7+PfjdtrSPtHRDxXpfedfwHOK7HUjuJxu1tHFp7feUFuoF90fAw210NfmSYjiV8ybuAo7z0bLa0lFwzONidkrKLy060gWP3FwrOY+iC33u1nrwo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=D7o28+x2; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="D7o28+x2" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 60F5AC4CEF1; Wed, 2 Jul 2025 17:36:15 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1751477777; bh=EXacDh63KkBz1wR1cI8SHJiNqTMma8mwS1tgtRPH3wA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=D7o28+x2/mgxX5nwyTE3ftuQMeHogJzEYes3Ly2UlH8uednwmJKST3ewJ5iH2A1Vp ElfKd5xPUPlSCV83J4L+M7TikEEP1hf2Od29EO2TYF/Lgb4Z7X31knbOAweLKZsytn 8ByZtOgjwAjV1Kd4WyulYkTl9rdwgtMsOBNYnOkaJlnbBS4oCxG0etKLgn/yuscXDf W8onn6+Eggn8QWb83ajII+2fm/ZwSee7WmsUPFt2gg8+NiNljcHzwQaOt7NxI7cUSH EkQ8Sz8NxvC/LK9HmLz5ld857e2BlW3WQWLJj5sYPw7fWgFyAMRXaaqKOU+oZNtgrF ih5OigKav5t5g== From: Mike Rapoport To: Andrew Morton Cc: Alexandre Ghiti , David Hildenbrand , Mike Rapoport , Oscar Salvador , Pratyush Yadav , linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: [PATCH 2/3] cma: split resrvation of fixed area into a helper function Date: Wed, 2 Jul 2025 20:36:04 +0300 Message-ID: <20250702173605.2198924-3-rppt@kernel.org> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250702173605.2198924-1-rppt@kernel.org> References: <20250702173605.2198924-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" Move the check that verifies that reservation of fixed area does not cross HIGHMEM boundary and the actual memblock_resrve() call into a helper function. This makes code more readable and decouples logic related to CONFIG_HIGHMEM from the core functionality of __cma_declare_contiguous_nid(). Signed-off-by: Mike Rapoport (Microsoft) Acked-by: David Hildenbrand Acked-by: Oscar Salvador --- mm/cma.c | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 9bf95f8f0f33..1df8ff312d99 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -352,6 +352,30 @@ static void __init list_insert_sorted( } } =20 +static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size) +{ + if (IS_ENABLED(CONFIG_HIGHMEM)) { + phys_addr_t highmem_start =3D __pa(high_memory - 1) + 1; + + /* + * If allocating at a fixed base the request region must not + * cross the low/high memory boundary. + */ + if (base < highmem_start && base + size > highmem_start) { + pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", + &base, &highmem_start); + return -EINVAL; + } + } + + if (memblock_is_region_reserved(base, size) || + memblock_reserve(base, size) < 0) { + return -EBUSY; + } + + return 0; +} + static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, @@ -407,15 +431,6 @@ static int __init __cma_declare_contiguous_nid(phys_ad= dr_t *basep, if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; =20 - /* - * If allocating at a fixed base the request region must not cross the - * low/high memory boundary. - */ - if (fixed && base < highmem_start && base + size > highmem_start) { - pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", - &base, &highmem_start); - return -EINVAL; - } =20 /* * If the limit is unspecified or above the memblock end, its effective @@ -433,10 +448,10 @@ static int __init __cma_declare_contiguous_nid(phys_a= ddr_t *basep, =20 /* Reserve memory */ if (fixed) { - if (memblock_is_region_reserved(base, size) || - memblock_reserve(base, size) < 0) { - return -EBUSY; - } + int err =3D cma_fixed_reserve(base, size); + + if (err) + return err; } else { phys_addr_t addr =3D 0; =20 --=20 2.47.2 From nobody Wed Oct 8 03:44:53 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 83D3B303DF9 for ; Wed, 2 Jul 2025 17:36:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751477780; cv=none; b=NvtoNwojat1YxNPt84H+5sX+FXe2IwJq4yeeSfyYf10VLLEJn8iJdSlNfDVk5R5KFF7eUjMdMRLMaTBX0RbdqFo3c/uwCKkdkesXy3GqPYeOMD3SYPLMu5do/+T2pR5dcFK3+OCUZEAcTocTBIPbBFylJiE3YzX64dq8M3dWA+E= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751477780; c=relaxed/simple; bh=NqKk8ngarG6g3EyeYQKPZPq7rwSgnU5mnxeVmbW10t0=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=VKhWAsFbin3jCRL4eX/yFXfpOFQWD8TxVIBbEyYo4ON2sVIOXMNWxMypXbASluwZXTmIidZvPl/qVc5p6BhwS8OuHS3HnApjXaxm8IBdTg9k5YtA+KezRKOQmJ6W0e5WGbIo7W6qRt2y+BktRcQH/uOv+py6pcL0FZWzzHiUHvE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=vQleZpGg; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="vQleZpGg" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 2A365C4CEEE; Wed, 2 Jul 2025 17:36:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1751477780; bh=NqKk8ngarG6g3EyeYQKPZPq7rwSgnU5mnxeVmbW10t0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=vQleZpGgNy3mRTy5dbm9tOQSQllRNd7C7MW0izMjJH9NSe7mbO92fQJ0qQUrNG2R2 2dIWbQ1ryYr6OTjjqOP5P3arqEyF2pAUeb1rHo+D1daavMkQN2y7kXt/V9IBRCneJY XRE7FNsbSaTMWKkw15gykIINHiFYjQ6TEjSLcBUS4TVVcGMjBKE/VnrzrJ5iABejga +4QMO6t9+bbz4kDf9Kqt00H7L3dEOFkyjj4DuZYQsGl6oSHR97tFHEBZLTzCnrBzA+ zKP74NX5aLNUqsRFVdYFODDbnG27smcrw3eGree46NKcn5WyuA+5aAhuK5cFuhMBi+ n4oL+U2o1vUqg== From: Mike Rapoport To: Andrew Morton Cc: Alexandre Ghiti , David Hildenbrand , Mike Rapoport , Oscar Salvador , Pratyush Yadav , linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: [PATCH 3/3] cma: move allocation from HIGHMEM to a helper function Date: Wed, 2 Jul 2025 20:36:05 +0300 Message-ID: <20250702173605.2198924-4-rppt@kernel.org> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250702173605.2198924-1-rppt@kernel.org> References: <20250702173605.2198924-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" When CONFIG_HIGMEM is enabled, __cma_declare_contiguous_nid() first tries to allocate the area from HIGHMEM and if that fails it falls back to allocation from low memory. Split allocation from HIGMEM into a helper function to further decouple logic related to CONFIG_HIGHMEM. Signed-off-by: Mike Rapoport (Microsoft) --- mm/cma.c | 52 +++++++++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 1df8ff312d99..0a24c46f3296 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -376,6 +376,30 @@ static int __init cma_fixed_reserve(phys_addr_t base, = phys_addr_t size) return 0; } =20 +static phys_addr_t __init cma_alloc_highmem(phys_addr_t base, phys_addr_t = size, + phys_addr_t align, phys_addr_t *limit, int nid) +{ + phys_addr_t addr =3D 0; + + if (IS_ENABLED(CONFIG_HIGHMEM)) { + phys_addr_t highmem =3D __pa(high_memory - 1) + 1; + + /* + * All pages in the reserved area must come from the same zone. + * If the requested region crosses the low/high memory boundary, + * try allocating from high memory first and fall back to low + * memory in case of failure. + */ + if (base < highmem && *limit > highmem) { + addr =3D memblock_alloc_range_nid(size, align, highmem, + *limit, nid, true); + *limit =3D highmem; + } + } + + return addr; +} + static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, @@ -383,19 +407,9 @@ static int __init __cma_declare_contiguous_nid(phys_ad= dr_t *basep, int nid) { phys_addr_t memblock_end =3D memblock_end_of_DRAM(); - phys_addr_t highmem_start, base =3D *basep; + phys_addr_t base =3D *basep; int ret; =20 - /* - * We can't use __pa(high_memory) directly, since high_memory - * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) - * complain. Find the boundary by adding one to the last valid - * address. - */ - if (IS_ENABLED(CONFIG_HIGHMEM)) - highmem_start =3D __pa(high_memory - 1) + 1; - else - highmem_start =3D memblock_end_of_DRAM(); pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); =20 @@ -472,18 +486,10 @@ static int __init __cma_declare_contiguous_nid(phys_a= ddr_t *basep, } #endif =20 - /* - * All pages in the reserved area must come from the same zone. - * If the requested region crosses the low/high memory boundary, - * try allocating from high memory first and fall back to low - * memory in case of failure. - */ - if (!addr && base < highmem_start && limit > highmem_start) { - addr =3D memblock_alloc_range_nid(size, alignment, - highmem_start, limit, nid, true); - limit =3D highmem_start; - } - + /* On systems with HIGHMEM try allocating from there first */ + if (!addr) + addr =3D cma_alloc_highmem(base, size, alignment, &limit, + nid); if (!addr) { addr =3D memblock_alloc_range_nid(size, alignment, base, limit, nid, true); --=20 2.47.2