From nobody Wed Oct 8 00:28:38 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D0205293469 for ; Thu, 3 Jul 2025 18:47:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751568441; cv=none; b=gG+eSGkWfxewGtaQ43Oy7cVI/E9M9QKupgv55Xy+v17fbR344XP5bWJKI/eI2wJSql29xqBnL6LGKiHse+RYXOIVt1Da+z8V6xzCHDQ5H/jBq+5hOrHoCmaQCP6gz2ue8WnoHdfGTRyh/VtbK3bqt3bb8ozkvkQfCeAIXYIuS88= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751568441; c=relaxed/simple; bh=4QUbQceDgpTB7furRIxt3J/Itk7emLWN+QIP9ZQ7hRk=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=nVo6eGSV9iRGu+hLD+IU+Lf4y1kBAUgdUkBc1kZLM0WqJSVf53f+11s2g2mIYXNrM3iXDsOhGGlNi67xtn46dXCXcDKZGqPruYgG6GCqhw1KmxGodDb7I6ONVdiN0qwoxDWIghpGPkwxTg4UOjPxVw+xlm4XMnPhr7MeFIywNYs= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=LGQbRdl6; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="LGQbRdl6" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3ECC5C4CEF2; Thu, 3 Jul 2025 18:47:18 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1751568440; bh=4QUbQceDgpTB7furRIxt3J/Itk7emLWN+QIP9ZQ7hRk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=LGQbRdl6+BywZx3nIIybXt8Bw1kieyNxqeJBB/gEQeV8kciKrZDLsUpDsJ/sYRG18 PYxWQNlHbuvv9qXAsJ6Xzxr/9lbJw7Ouu1HaFX3wBWlS1TPCdp5c71wL7cho4W+uQM oNYXvQ1T5+YWEvjk22ydvmdrWvoiRdbNetrCAuxIt+JDFgYcr6o//gz4KjY/2G1kJ6 AI6fvYLZM7sKI76ABHVvjm37RJx/UsMG5EbFhOZotO9TOwe13KGYSFMteiBqIghGEj aeCkWp+t7dCYofUfnfpOSXLRGtJ41Mv2KliKFlpsj47jUqkJOT1zutimSYddclCRfx VYsFRN48hH5mQ== From: Mike Rapoport To: Andrew Morton Cc: Alexandre Ghiti , David Hildenbrand , Mike Rapoport , Oscar Salvador , Pratyush Yadav , linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: [PATCH v2 1/3] cma: move __cma_declare_contiguous_nid() before its usage Date: Thu, 3 Jul 2025 21:47:09 +0300 Message-ID: <20250703184711.3485940-2-rppt@kernel.org> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250703184711.3485940-1-rppt@kernel.org> References: <20250703184711.3485940-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" and kill static declaration Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Oscar Salvador Acked-by: David Hildenbrand --- mm/cma.c | 294 +++++++++++++++++++++++++++---------------------------- 1 file changed, 144 insertions(+), 150 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 397567883a10..9bf95f8f0f33 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -35,12 +35,6 @@ struct cma cma_areas[MAX_CMA_AREAS]; unsigned int cma_area_count; =20 -static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, - phys_addr_t size, phys_addr_t limit, - phys_addr_t alignment, unsigned int order_per_bit, - bool fixed, const char *name, struct cma **res_cma, - int nid); - phys_addr_t cma_get_base(const struct cma *cma) { WARN_ON_ONCE(cma->nranges !=3D 1); @@ -358,6 +352,150 @@ static void __init list_insert_sorted( } } =20 +static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + bool fixed, const char *name, struct cma **res_cma, + int nid) +{ + phys_addr_t memblock_end =3D memblock_end_of_DRAM(); + phys_addr_t highmem_start, base =3D *basep; + int ret; + + /* + * We can't use __pa(high_memory) directly, since high_memory + * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) + * complain. Find the boundary by adding one to the last valid + * address. + */ + if (IS_ENABLED(CONFIG_HIGHMEM)) + highmem_start =3D __pa(high_memory - 1) + 1; + else + highmem_start =3D memblock_end_of_DRAM(); + pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", + __func__, &size, &base, &limit, &alignment); + + if (cma_area_count =3D=3D ARRAY_SIZE(cma_areas)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + if (!size) + return -EINVAL; + + if (alignment && !is_power_of_2(alignment)) + return -EINVAL; + + if (!IS_ENABLED(CONFIG_NUMA)) + nid =3D NUMA_NO_NODE; + + /* Sanitise input arguments. */ + alignment =3D max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); + if (fixed && base & (alignment - 1)) { + pr_err("Region at %pa must be aligned to %pa bytes\n", + &base, &alignment); + return -EINVAL; + } + base =3D ALIGN(base, alignment); + size =3D ALIGN(size, alignment); + limit &=3D ~(alignment - 1); + + if (!base) + fixed =3D false; + + /* size should be aligned with order_per_bit */ + if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + /* + * If allocating at a fixed base the request region must not cross the + * low/high memory boundary. + */ + if (fixed && base < highmem_start && base + size > highmem_start) { + pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", + &base, &highmem_start); + return -EINVAL; + } + + /* + * If the limit is unspecified or above the memblock end, its effective + * value will be the memblock end. Set it explicitly to simplify further + * checks. + */ + if (limit =3D=3D 0 || limit > memblock_end) + limit =3D memblock_end; + + if (base + size > limit) { + pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", + &size, &base, &limit); + return -EINVAL; + } + + /* Reserve memory */ + if (fixed) { + if (memblock_is_region_reserved(base, size) || + memblock_reserve(base, size) < 0) { + return -EBUSY; + } + } else { + phys_addr_t addr =3D 0; + + /* + * If there is enough memory, try a bottom-up allocation first. + * It will place the new cma area close to the start of the node + * and guarantee that the compaction is moving pages out of the + * cma area and not into it. + * Avoid using first 4GB to not interfere with constrained zones + * like DMA/DMA32. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!memblock_bottom_up() && memblock_end >=3D SZ_4G + size) { + memblock_set_bottom_up(true); + addr =3D memblock_alloc_range_nid(size, alignment, SZ_4G, + limit, nid, true); + memblock_set_bottom_up(false); + } +#endif + + /* + * All pages in the reserved area must come from the same zone. + * If the requested region crosses the low/high memory boundary, + * try allocating from high memory first and fall back to low + * memory in case of failure. + */ + if (!addr && base < highmem_start && limit > highmem_start) { + addr =3D memblock_alloc_range_nid(size, alignment, + highmem_start, limit, nid, true); + limit =3D highmem_start; + } + + if (!addr) { + addr =3D memblock_alloc_range_nid(size, alignment, base, + limit, nid, true); + if (!addr) + return -ENOMEM; + } + + /* + * kmemleak scans/reads tracked objects for pointers to other + * objects but this address isn't mapped and accessible + */ + kmemleak_ignore_phys(addr); + base =3D addr; + } + + ret =3D cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); + if (ret) { + memblock_phys_free(base, size); + return ret; + } + + (*res_cma)->nid =3D nid; + *basep =3D base; + + return 0; +} + /* * Create CMA areas with a total size of @total_size. A normal allocation * for one area is tried first. If that fails, the biggest memblock @@ -593,150 +731,6 @@ int __init cma_declare_contiguous_nid(phys_addr_t bas= e, return ret; } =20 -static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, - phys_addr_t size, phys_addr_t limit, - phys_addr_t alignment, unsigned int order_per_bit, - bool fixed, const char *name, struct cma **res_cma, - int nid) -{ - phys_addr_t memblock_end =3D memblock_end_of_DRAM(); - phys_addr_t highmem_start, base =3D *basep; - int ret; - - /* - * We can't use __pa(high_memory) directly, since high_memory - * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) - * complain. Find the boundary by adding one to the last valid - * address. - */ - if (IS_ENABLED(CONFIG_HIGHMEM)) - highmem_start =3D __pa(high_memory - 1) + 1; - else - highmem_start =3D memblock_end_of_DRAM(); - pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", - __func__, &size, &base, &limit, &alignment); - - if (cma_area_count =3D=3D ARRAY_SIZE(cma_areas)) { - pr_err("Not enough slots for CMA reserved regions!\n"); - return -ENOSPC; - } - - if (!size) - return -EINVAL; - - if (alignment && !is_power_of_2(alignment)) - return -EINVAL; - - if (!IS_ENABLED(CONFIG_NUMA)) - nid =3D NUMA_NO_NODE; - - /* Sanitise input arguments. */ - alignment =3D max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); - if (fixed && base & (alignment - 1)) { - pr_err("Region at %pa must be aligned to %pa bytes\n", - &base, &alignment); - return -EINVAL; - } - base =3D ALIGN(base, alignment); - size =3D ALIGN(size, alignment); - limit &=3D ~(alignment - 1); - - if (!base) - fixed =3D false; - - /* size should be aligned with order_per_bit */ - if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) - return -EINVAL; - - /* - * If allocating at a fixed base the request region must not cross the - * low/high memory boundary. - */ - if (fixed && base < highmem_start && base + size > highmem_start) { - pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", - &base, &highmem_start); - return -EINVAL; - } - - /* - * If the limit is unspecified or above the memblock end, its effective - * value will be the memblock end. Set it explicitly to simplify further - * checks. - */ - if (limit =3D=3D 0 || limit > memblock_end) - limit =3D memblock_end; - - if (base + size > limit) { - pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", - &size, &base, &limit); - return -EINVAL; - } - - /* Reserve memory */ - if (fixed) { - if (memblock_is_region_reserved(base, size) || - memblock_reserve(base, size) < 0) { - return -EBUSY; - } - } else { - phys_addr_t addr =3D 0; - - /* - * If there is enough memory, try a bottom-up allocation first. - * It will place the new cma area close to the start of the node - * and guarantee that the compaction is moving pages out of the - * cma area and not into it. - * Avoid using first 4GB to not interfere with constrained zones - * like DMA/DMA32. - */ -#ifdef CONFIG_PHYS_ADDR_T_64BIT - if (!memblock_bottom_up() && memblock_end >=3D SZ_4G + size) { - memblock_set_bottom_up(true); - addr =3D memblock_alloc_range_nid(size, alignment, SZ_4G, - limit, nid, true); - memblock_set_bottom_up(false); - } -#endif - - /* - * All pages in the reserved area must come from the same zone. - * If the requested region crosses the low/high memory boundary, - * try allocating from high memory first and fall back to low - * memory in case of failure. - */ - if (!addr && base < highmem_start && limit > highmem_start) { - addr =3D memblock_alloc_range_nid(size, alignment, - highmem_start, limit, nid, true); - limit =3D highmem_start; - } - - if (!addr) { - addr =3D memblock_alloc_range_nid(size, alignment, base, - limit, nid, true); - if (!addr) - return -ENOMEM; - } - - /* - * kmemleak scans/reads tracked objects for pointers to other - * objects but this address isn't mapped and accessible - */ - kmemleak_ignore_phys(addr); - base =3D addr; - } - - ret =3D cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); - if (ret) { - memblock_phys_free(base, size); - return ret; - } - - (*res_cma)->nid =3D nid; - *basep =3D base; - - return 0; -} - static void cma_debug_show_areas(struct cma *cma) { unsigned long next_zero_bit, next_set_bit, nr_zero; --=20 2.47.2 From nobody Wed Oct 8 00:28:38 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7153A2F2C60 for ; Thu, 3 Jul 2025 18:47:23 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751568443; cv=none; b=fxcODahiHStS9kmyDVc4ZGwi7H5OvdkpV/4iKjuyfnLQLFFj87ldlXbmznurM0t+OREnOlwLzX5nIJNJg63m9p73ep+clY0KiHAjtpdFFFqrhA9pWV8G87UzhgKxCVRxVdi11Hai0rt8EDNHiRb9lA/lOLLL4HbVRdV9Z9vrQhk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751568443; c=relaxed/simple; bh=fDYGSp0mhPleTZCOwqNaMFjGYXswrc476BaeDuKTLlQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=eEGyXUzqmzr/0syjEMs8Qs7yd915CDf3+okkRX+qmmDA3bnlHiNixz1r5/kY3Hfox2YmyuGwX1Su+wqL1mKXf9lZeEtzNViGJ2HENDBOxXBzipKFHnUZY2q2qg6vCeTYqNE/HpxeOOW4N7/N7twy78yDpLIL1RQIF9ZAf8aPoh8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=BL7X8Y9z; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="BL7X8Y9z" Received: by smtp.kernel.org (Postfix) with ESMTPSA id DD96FC4CEE3; Thu, 3 Jul 2025 18:47:20 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1751568443; bh=fDYGSp0mhPleTZCOwqNaMFjGYXswrc476BaeDuKTLlQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=BL7X8Y9zuboU6b75TO6pEI89umaqtfAw0Fvdg7scetSgKDDqeKT+83YXNi1vmr/Rk vUhKs7AuKX+6I6vMVc37yZ4pFz4TJ8+6/wEo2jePBl5I/wJPYnSOLiame9qlsoVG+7 B+51ZeUdIF2j4bDYbBkDiWazpXwPj9BspVIbNHip+hvMiKqXJbCT6qBhAvKljZ+IZU +XbDBSMDPlSWd90wBbL6Bto7eHljFXfQAz1jPt+o7QIW/zLj1NArOTUdWnqucpc0aN P8Gix12xky2S6OqLhFKjPWnwayL3lxSzIIKCqODV+wS02aS33tg1R0T2ufI8XFSgOE 1QDvCuxOI6Dug== From: Mike Rapoport To: Andrew Morton Cc: Alexandre Ghiti , David Hildenbrand , Mike Rapoport , Oscar Salvador , Pratyush Yadav , linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: [PATCH v2 2/3] cma: split resrvation of fixed area into a helper function Date: Thu, 3 Jul 2025 21:47:10 +0300 Message-ID: <20250703184711.3485940-3-rppt@kernel.org> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250703184711.3485940-1-rppt@kernel.org> References: <20250703184711.3485940-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" Move the check that verifies that reservation of fixed area does not cross HIGHMEM boundary and the actual memblock_resrve() call into a helper function. This makes code more readable and decouples logic related to CONFIG_HIGHMEM from the core functionality of __cma_declare_contiguous_nid(). Signed-off-by: Mike Rapoport (Microsoft) Acked-by: Oscar Salvador Acked-by: David Hildenbrand --- mm/cma.c | 40 +++++++++++++++++++++++++++------------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 9bf95f8f0f33..40986722f2e2 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -352,6 +352,30 @@ static void __init list_insert_sorted( } } =20 +static int __init cma_fixed_reserve(phys_addr_t base, phys_addr_t size) +{ + if (IS_ENABLED(CONFIG_HIGHMEM)) { + phys_addr_t highmem_start =3D __pa(high_memory - 1) + 1; + + /* + * If allocating at a fixed base the request region must not + * cross the low/high memory boundary. + */ + if (base < highmem_start && base + size > highmem_start) { + pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", + &base, &highmem_start); + return -EINVAL; + } + } + + if (memblock_is_region_reserved(base, size) || + memblock_reserve(base, size) < 0) { + return -EBUSY; + } + + return 0; +} + static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, @@ -407,15 +431,6 @@ static int __init __cma_declare_contiguous_nid(phys_ad= dr_t *basep, if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; =20 - /* - * If allocating at a fixed base the request region must not cross the - * low/high memory boundary. - */ - if (fixed && base < highmem_start && base + size > highmem_start) { - pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", - &base, &highmem_start); - return -EINVAL; - } =20 /* * If the limit is unspecified or above the memblock end, its effective @@ -433,10 +448,9 @@ static int __init __cma_declare_contiguous_nid(phys_ad= dr_t *basep, =20 /* Reserve memory */ if (fixed) { - if (memblock_is_region_reserved(base, size) || - memblock_reserve(base, size) < 0) { - return -EBUSY; - } + ret =3D cma_fixed_reserve(base, size); + if (ret) + return ret; } else { phys_addr_t addr =3D 0; =20 --=20 2.47.2 From nobody Wed Oct 8 00:28:38 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 337192F235C for ; Thu, 3 Jul 2025 18:47:25 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751568446; cv=none; b=p2A2uM8Y2sMHkFKaGQxZi0wHAbn4AidzZzcsDzEHEWmBbF3mihNYep+8KeefroeVGRGjBJ64XvYDma49+wLnXDm8iCylMrbgiqHMQRyg5XuSeOqrmMce/OrGrHOWXva6EZC6C6dAeCNQ1q4FAb3GVI03IE+mm57eu7V8TSXqsbo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1751568446; c=relaxed/simple; bh=tcEi1U27IGWBqhLz+MiaA8hsyIY4ToMuJMarpB78bYY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=nkCLS4idJ9jHZUAKn3mD68XOMJ1k47AGkKfpvXKTHnHw7qDhsZ52S8SfwRL7NGgygY/6tw0LVkpZGzO5Ka0BeSIcsPE7zRtcvErBFaRNbOkY+3LSCnTmAd+x6ALw4NmxZqivFY/qN50Sx0W1cf9cqPT0Lzo92/jZJ2I3pBgf+v0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=cC1HbyRC; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="cC1HbyRC" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 8854AC4CEEB; Thu, 3 Jul 2025 18:47:23 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1751568445; bh=tcEi1U27IGWBqhLz+MiaA8hsyIY4ToMuJMarpB78bYY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=cC1HbyRCOJFDG82jAk9zvUdgKaaXF9EB4TpKl2LDzz6smpm/KMg41qjDNNlssg4Ft 9EJSyiaZ2couYjzqBYw7BDXDvfLWfpLdkMTfkkjpjGWfPjZC00NfITqCaG7yXeKEkF yudER+7ibLUzi5Uf/pRpNRaqiEYRkVv7pRYJQnXQHCFn6eJUI0WIKqy7dvyQXyHMdV oozjI7A4UCyMbrXPg0auXTPormz7d3rIsY++U4jvGqTKZTbQQsrG+gHEQWBVakV9mk sdNIwJHwePibkZV0r5Sds9pLShJ/LQhv0eqCm3dURTxokrRJ9KQrSKwbAZoT8KKnW4 gQ6ltbIIXpxxg== From: Mike Rapoport To: Andrew Morton Cc: Alexandre Ghiti , David Hildenbrand , Mike Rapoport , Oscar Salvador , Pratyush Yadav , linux-kernel@vger.kernel.org, linux-mm@kvack.org Subject: [PATCH v2 3/3] cma: move memory allocation to a helper function Date: Thu, 3 Jul 2025 21:47:11 +0300 Message-ID: <20250703184711.3485940-4-rppt@kernel.org> X-Mailer: git-send-email 2.47.2 In-Reply-To: <20250703184711.3485940-1-rppt@kernel.org> References: <20250703184711.3485940-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" __cma_declare_contiguous_nid() tries to allocate memory in several ways: * on systems with 64 bit physical address and enough memory it first attempts to allocate memory just above 4GiB * if that fails, on systems with HIGHMEM the next attempt is from high memory * and at last, if none of the previous attempts succeeded, or was even tried because of incompatible configuration, the memory is allocated anywhere within specified limits. Move all the allocation logic to a helper function to make these steps more obvious. Signed-off-by: Mike Rapoport (Microsoft) Acked-by: David Hildenbrand Acked-by: Oscar Salvador --- mm/cma.c | 104 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 54 insertions(+), 50 deletions(-) diff --git a/mm/cma.c b/mm/cma.c index 40986722f2e2..38876ccc07cf 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -376,6 +376,55 @@ static int __init cma_fixed_reserve(phys_addr_t base, = phys_addr_t size) return 0; } =20 +static phys_addr_t __init cma_alloc_mem(phys_addr_t base, phys_addr_t size, + phys_addr_t align, phys_addr_t limit, int nid) +{ + phys_addr_t addr =3D 0; + + /* + * If there is enough memory, try a bottom-up allocation first. + * It will place the new cma area close to the start of the node + * and guarantee that the compaction is moving pages out of the + * cma area and not into it. + * Avoid using first 4GB to not interfere with constrained zones + * like DMA/DMA32. + */ +#ifdef CONFIG_PHYS_ADDR_T_64BIT + if (!memblock_bottom_up() && limit >=3D SZ_4G + size) { + memblock_set_bottom_up(true); + addr =3D memblock_alloc_range_nid(size, align, SZ_4G, limit, + nid, true); + memblock_set_bottom_up(false); + } +#endif + + /* + * On systems with HIGHMEM try allocating from there before consuming + * memory in lower zones. + */ + if (!addr && IS_ENABLED(CONFIG_HIGHMEM)) { + phys_addr_t highmem =3D __pa(high_memory - 1) + 1; + + /* + * All pages in the reserved area must come from the same zone. + * If the requested region crosses the low/high memory boundary, + * try allocating from high memory first and fall back to low + * memory in case of failure. + */ + if (base < highmem && limit > highmem) { + addr =3D memblock_alloc_range_nid(size, align, highmem, + limit, nid, true); + limit =3D highmem; + } + } + + if (!addr) + addr =3D memblock_alloc_range_nid(size, align, base, limit, nid, + true); + + return addr; +} + static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, @@ -383,19 +432,9 @@ static int __init __cma_declare_contiguous_nid(phys_ad= dr_t *basep, int nid) { phys_addr_t memblock_end =3D memblock_end_of_DRAM(); - phys_addr_t highmem_start, base =3D *basep; + phys_addr_t base =3D *basep; int ret; =20 - /* - * We can't use __pa(high_memory) directly, since high_memory - * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) - * complain. Find the boundary by adding one to the last valid - * address. - */ - if (IS_ENABLED(CONFIG_HIGHMEM)) - highmem_start =3D __pa(high_memory - 1) + 1; - else - highmem_start =3D memblock_end_of_DRAM(); pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); =20 @@ -452,50 +491,15 @@ static int __init __cma_declare_contiguous_nid(phys_a= ddr_t *basep, if (ret) return ret; } else { - phys_addr_t addr =3D 0; - - /* - * If there is enough memory, try a bottom-up allocation first. - * It will place the new cma area close to the start of the node - * and guarantee that the compaction is moving pages out of the - * cma area and not into it. - * Avoid using first 4GB to not interfere with constrained zones - * like DMA/DMA32. - */ -#ifdef CONFIG_PHYS_ADDR_T_64BIT - if (!memblock_bottom_up() && memblock_end >=3D SZ_4G + size) { - memblock_set_bottom_up(true); - addr =3D memblock_alloc_range_nid(size, alignment, SZ_4G, - limit, nid, true); - memblock_set_bottom_up(false); - } -#endif - - /* - * All pages in the reserved area must come from the same zone. - * If the requested region crosses the low/high memory boundary, - * try allocating from high memory first and fall back to low - * memory in case of failure. - */ - if (!addr && base < highmem_start && limit > highmem_start) { - addr =3D memblock_alloc_range_nid(size, alignment, - highmem_start, limit, nid, true); - limit =3D highmem_start; - } - - if (!addr) { - addr =3D memblock_alloc_range_nid(size, alignment, base, - limit, nid, true); - if (!addr) - return -ENOMEM; - } + base =3D cma_alloc_mem(base, size, alignment, limit, nid); + if (!base) + return -ENOMEM; =20 /* * kmemleak scans/reads tracked objects for pointers to other * objects but this address isn't mapped and accessible */ - kmemleak_ignore_phys(addr); - base =3D addr; + kmemleak_ignore_phys(base); } =20 ret =3D cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); --=20 2.47.2