From nobody Sat Oct 4 14:12:50 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0C23C25C83A; Mon, 18 Aug 2025 06:46:33 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755499594; cv=none; b=PXYby5tEh6GN5PViM9kuQifTxi6DL2iI5NPRXivrZE/k3+c4ii3kmjb8njCR3LQxbk07bC1Bfr0N9pHKdr9bQ7bRTWQlZdxh9TE5fEO8DnvpJuPQWSjaiHEpwhEL1Q+h/LBNfk5YJCpM5mw9DUhVqIxT+qU9eROAksxkMS6ANAA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755499594; c=relaxed/simple; bh=DisL+u0A+oDt7cK+lZ4T+ubCByoMEuH39AEZRCnBWcM=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ACSrboQOBEj6TdfY28cZ2xyUSUotTrEsIMpcp3uDcb12sRwXrvlAtY73+POepzWiMWto0MbuVhfKCMQmA+MXzIwD1f3X/aYRruw+dUjNBN1jOlVeHxdlKjImWymIxG3N6zCHrG/zvYVtd04KE2iUF+SaPM2hizMLaO4ZQ/pLCsI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=MHwvojO5; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="MHwvojO5" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 96028C4CEED; Mon, 18 Aug 2025 06:46:30 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1755499593; bh=DisL+u0A+oDt7cK+lZ4T+ubCByoMEuH39AEZRCnBWcM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=MHwvojO5ZNQ/vIbqTX2n+p36PC7eGEFq5CKUCZcbcf+pDLqlU5qIAkSNiv7MO9IzM PQ+YPYOTEmQ6eW0FZFl5PgCJQQZ/kBct1QG9lJweWjnLh238X+HGsXGJpWZaxA+ZAT ysCq4vtiZoiCv5ENtSwG7GMkrUD0nlaTBdMdsKdNF2YkXz4Tt0TIHQXjaqMxIgK6py D3LwtTcQRZpsR7d7rN2my9k+45XHP9FfmM3HzPbEUET+rQG1IBYvvNkkrDWZgchm7q ktz1nk2n5SFzJXxJljWaJUdV6ZUj3gw6WBkd3ccgikDjUGIGuuFIfEkDt8zXNC4ewp B63YizXPZVfYA== From: Mike Rapoport To: linux-mm@kvack.org Cc: Andrew Morton , Bill Wendling , Daniel Jordan , Justin Stitt , Michael Ellerman , Miguel Ojeda , Mike Rapoport , Nathan Chancellor , Nick Desaulniers , linux-kernel@vger.kernel.org, llvm@lists.linux.dev Subject: [PATCH 3/4] mm/mm_init: drop deferred_init_maxorder() Date: Mon, 18 Aug 2025 09:46:14 +0300 Message-ID: <20250818064615.505641-4-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20250818064615.505641-1-rppt@kernel.org> References: <20250818064615.505641-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" deferred_init_memmap_chunk() calls deferred_init_maxorder() to initialize struct pages in MAX_ORDER_NR_PAGES because according to commit 0e56acae4b4d ("mm: initialize MAX_ORDER_NR_PAGES at a time instead of doing larger sections") this provides better cache locality than initializing the memory map in larger sections. The looping through free memory ranges is quite cumbersome in the current implementation as it is divided between deferred_init_memmap_chunk() and deferred_init_maxorder(). Besides, the latter has two loops, one that initializes struct pages and another one that frees them. There is no need in two loops because it is safe to free pages in groups smaller than MAX_ORDER_NR_PAGES. Even if lookup for a buddy page will access a struct page ahead of the pages being initialized, that page is guaranteed to be initialized either by memmap_init_reserved_pages() or by init_unavailable_range(). Simplify the code by moving initialization and freeing of the pages into deferred_init_memmap_chunk() and dropping deferred_init_maxorder(). Signed-off-by: Mike Rapoport (Microsoft) Reviewed-by: David Hildenbrand --- mm/mm_init.c | 122 ++++++++++++--------------------------------------- 1 file changed, 29 insertions(+), 93 deletions(-) diff --git a/mm/mm_init.c b/mm/mm_init.c index 1ecfba98ddbe..bca05891cb16 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2046,111 +2046,47 @@ static unsigned long __init deferred_init_pages(st= ruct zone *zone, } =20 /* - * This function is meant to pre-load the iterator for the zone init from - * a given point. - * Specifically it walks through the ranges starting with initial index - * passed to it until we are caught up to the first_init_pfn value and - * exits there. If we never encounter the value we return false indicating - * there are no valid ranges left. - */ -static bool __init -deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, - unsigned long *spfn, unsigned long *epfn, - unsigned long first_init_pfn) -{ - u64 j =3D *i; - - if (j =3D=3D 0) - __next_mem_pfn_range_in_zone(&j, zone, spfn, epfn); - - /* - * Start out by walking through the ranges in this zone that have - * already been initialized. We don't need to do anything with them - * so we just need to flush them out of the system. - */ - for_each_free_mem_pfn_range_in_zone_from(j, zone, spfn, epfn) { - if (*epfn <=3D first_init_pfn) - continue; - if (*spfn < first_init_pfn) - *spfn =3D first_init_pfn; - *i =3D j; - return true; - } - - return false; -} - -/* - * Initialize and free pages. We do it in two loops: first we initialize - * struct page, then free to buddy allocator, because while we are - * freeing pages we can access pages that are ahead (computing buddy - * page in __free_one_page()). + * Initialize and free pages. + * + * At this point reserved pages and struct pages that correspond to holes = in + * memblock.memory are already intialized so every free range has a valid + * memory map around it. + * This ensures that access of pages that are ahead of the range being + * initialized (computing buddy page in __free_one_page()) always reads a = valid + * struct page. * - * In order to try and keep some memory in the cache we have the loop - * broken along max page order boundaries. This way we will not cause - * any issues with the buddy page computation. + * In order to try and improve CPU cache locality we have the loop broken = along + * max page order boundaries. */ static unsigned long __init -deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, - unsigned long *end_pfn) +deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, + struct zone *zone) { - unsigned long mo_pfn =3D ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); - unsigned long spfn =3D *start_pfn, epfn =3D *end_pfn; + int nid =3D zone_to_nid(zone); unsigned long nr_pages =3D 0; - u64 j =3D *i; - - /* First we loop through and initialize the page values */ - for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { - unsigned long t; - - if (mo_pfn <=3D *start_pfn) - break; - - t =3D min(mo_pfn, *end_pfn); - nr_pages +=3D deferred_init_pages(zone, *start_pfn, t); - - if (mo_pfn < *end_pfn) { - *start_pfn =3D mo_pfn; - break; - } - } - - /* Reset values and now loop through freeing pages as needed */ - swap(j, *i); - - for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { - unsigned long t; - - if (mo_pfn <=3D spfn) - break; + phys_addr_t start, end; + u64 i =3D 0; =20 - t =3D min(mo_pfn, epfn); - deferred_free_pages(spfn, t - spfn); + for_each_free_mem_range(i, nid, 0, &start, &end, NULL) { + unsigned long spfn =3D PFN_UP(start); + unsigned long epfn =3D PFN_DOWN(end); =20 - if (mo_pfn <=3D epfn) + if (spfn >=3D end_pfn) break; - } =20 - return nr_pages; -} + spfn =3D max(spfn, start_pfn); + epfn =3D min(epfn, end_pfn); =20 -static unsigned long __init -deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, - struct zone *zone) -{ - unsigned long nr_pages =3D 0; - unsigned long spfn, epfn; - u64 i =3D 0; + while (spfn < epfn) { + unsigned long mo_pfn =3D ALIGN(spfn + 1, MAX_ORDER_NR_PAGES); + unsigned long chunk_end =3D min(mo_pfn, epfn); =20 - deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); + nr_pages +=3D deferred_init_pages(zone, spfn, chunk_end); + deferred_free_pages(spfn, chunk_end - spfn); =20 - /* - * Initialize and free pages in MAX_PAGE_ORDER sized increments so that - * we can avoid introducing any issues with the buddy allocator. - */ - while (spfn < end_pfn) { - nr_pages +=3D deferred_init_maxorder(&i, zone, &spfn, &epfn); - cond_resched(); + spfn =3D chunk_end; + cond_resched(); + } } =20 return nr_pages; --=20 2.50.1