From nobody Fri Oct 3 13:32:05 2025 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id D6E7C2FB609; Mon, 1 Sep 2025 08:34:34 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1756715674; cv=none; b=epCr44FGp14Ed06fftDhVNtr65Jty/hEdoeYZgxnK0qhy2s/+yn4pUUvGUhoB8leRAy2QHCRUPN0FmEGcSshBu+XIi4OK4ZD+bX7FZ5r2U/7Hym2sP2XczPSUASCcNOpGOJcKYX6P2sKA1qMsh2c8M6xSRrMLJklUdUVu9HtFyA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1756715674; c=relaxed/simple; bh=mvEQOsEFmQKxumWhkG3GRFqxyOYNW/2qLceb9dOfvtA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=GArHhykae4BTrCQnCenSSq6zJwacq0ApPCrpUQSXCtgHkaslB5c/jD29D94M1ptoWXnFIyJcQ3h64PhvMKWZ/eaBpjyTwHssxsXZ9H5rnhmwZe+HjjekpBId5Wb78Po4csjl53OwaWTxxzS8slX9ep6o5RHndyxIAUgkFa2Y4zY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=ccNbs/L9; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ccNbs/L9" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 16D79C4CEF0; Mon, 1 Sep 2025 08:34:30 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1756715674; bh=mvEQOsEFmQKxumWhkG3GRFqxyOYNW/2qLceb9dOfvtA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ccNbs/L9dVqFOOgoQvr9r40GsGBiyS+d0rxD7ojEapK7eBD3HEkPqKAvaYYkNgDu6 mMU8sqZ7o7T5wdPyCDHApLqAMv4Q19hB/+ZxY+6kay1Df0zwYwNXZLeZE+x1EiBVdr CLZT+ln/eWuAc57ZofXTPMknYKRNPl235FoIeOoiKnl8I53dkZ9ko2mxb+IC3cvk2K oDBgtSKP6+35016qAt9J/OI30vFGezRB7YrZ5JScYbIy3+A6M71aKIl2SWPCy32iyB RRvaobo43O8CbuNFK+Z3MMAuVjbBuvpxTgj+VSWQ9XVpIZUxuC+xmLlpYF/ZxNFZ6j wfIrTSHpObmqw== From: Mike Rapoport To: linux-mm@kvack.org Cc: Andrew Morton , Bill Wendling , Daniel Jordan , David Hildenbrand , Justin Stitt , Michael Ellerman , Miguel Ojeda , Mike Rapoport , Nathan Chancellor , Nick Desaulniers , Wei Yang , linux-kernel@vger.kernel.org, llvm@lists.linux.dev Subject: [PATCH v2 1/4] mm/mm_init: use deferred_init_memmap_chunk() in deferred_grow_zone() Date: Mon, 1 Sep 2025 11:34:20 +0300 Message-ID: <20250901083423.3061349-2-rppt@kernel.org> X-Mailer: git-send-email 2.50.1 In-Reply-To: <20250901083423.3061349-1-rppt@kernel.org> References: <20250901083423.3061349-1-rppt@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Mike Rapoport (Microsoft)" deferred_grow_zone() initializes one or more sections in the memory map if buddy runs out of initialized struct pages when CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled. It loops through memblock regions and initializes and frees pages in MAX_ORDER_NR_PAGES chunks. Essentially the same loop is implemented in deferred_init_memmap_chunk(), the only actual difference is that deferred_init_memmap_chunk() does not count initialized pages. Make deferred_init_memmap_chunk() count the initialized pages and return their number, wrap it with deferred_init_memmap_job() for multithreaded initialization with padata_do_multithreaded() and replace open-coded initialization of struct pages in deferred_grow_zone() with a call to deferred_init_memmap_chunk(). Reviewed-by: David Hildenbrand Reviewed-by: Wei Yang Link: https://lore.kernel.org/r/20250818064615.505641-2-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) --- mm/mm_init.c | 70 +++++++++++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/mm/mm_init.c b/mm/mm_init.c index 5c21b3af216b..e73f313dc375 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2134,12 +2134,12 @@ deferred_init_maxorder(u64 *i, struct zone *zone, u= nsigned long *start_pfn, return nr_pages; } =20 -static void __init +static unsigned long __init deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, - void *arg) + struct zone *zone) { + unsigned long nr_pages =3D 0; unsigned long spfn, epfn; - struct zone *zone =3D arg; u64 i =3D 0; =20 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); @@ -2149,9 +2149,23 @@ deferred_init_memmap_chunk(unsigned long start_pfn, = unsigned long end_pfn, * we can avoid introducing any issues with the buddy allocator. */ while (spfn < end_pfn) { - deferred_init_maxorder(&i, zone, &spfn, &epfn); - cond_resched(); + nr_pages +=3D deferred_init_maxorder(&i, zone, &spfn, &epfn); + if (irqs_disabled()) + touch_nmi_watchdog(); + else + cond_resched(); } + + return nr_pages; +} + +static void __init +deferred_init_memmap_job(unsigned long start_pfn, unsigned long end_pfn, + void *arg) +{ + struct zone *zone =3D arg; + + deferred_init_memmap_chunk(start_pfn, end_pfn, zone); } =20 static unsigned int __init @@ -2204,7 +2218,7 @@ static int __init deferred_init_memmap(void *data) while (deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, first_= init_pfn)) { first_init_pfn =3D ALIGN(epfn, PAGES_PER_SECTION); struct padata_mt_job job =3D { - .thread_fn =3D deferred_init_memmap_chunk, + .thread_fn =3D deferred_init_memmap_job, .fn_arg =3D zone, .start =3D spfn, .size =3D first_init_pfn - spfn, @@ -2240,12 +2254,11 @@ static int __init deferred_init_memmap(void *data) */ bool __init deferred_grow_zone(struct zone *zone, unsigned int order) { - unsigned long nr_pages_needed =3D ALIGN(1 << order, PAGES_PER_SECTION); + unsigned long nr_pages_needed =3D SECTION_ALIGN_UP(1 << order); pg_data_t *pgdat =3D zone->zone_pgdat; unsigned long first_deferred_pfn =3D pgdat->first_deferred_pfn; unsigned long spfn, epfn, flags; unsigned long nr_pages =3D 0; - u64 i =3D 0; =20 /* Only the last zone may have deferred pages */ if (zone_end_pfn(zone) !=3D pgdat_end_pfn(pgdat)) @@ -2262,37 +2275,26 @@ bool __init deferred_grow_zone(struct zone *zone, u= nsigned int order) return true; } =20 - /* If the zone is empty somebody else may have cleared out the zone */ - if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, - first_deferred_pfn)) { - pgdat->first_deferred_pfn =3D ULONG_MAX; - pgdat_resize_unlock(pgdat, &flags); - /* Retry only once. */ - return first_deferred_pfn !=3D ULONG_MAX; + /* + * Initialize at least nr_pages_needed in section chunks. + * If a section has less free memory than nr_pages_needed, the next + * section will be also initialized. + * Note, that it still does not guarantee that allocation of order can + * be satisfied if the sections are fragmented because of memblock + * allocations. + */ + for (spfn =3D first_deferred_pfn, epfn =3D SECTION_ALIGN_UP(spfn + 1); + nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone); + spfn =3D epfn, epfn +=3D PAGES_PER_SECTION) { + nr_pages +=3D deferred_init_memmap_chunk(spfn, epfn, zone); } =20 /* - * Initialize and free pages in MAX_PAGE_ORDER sized increments so - * that we can avoid introducing any issues with the buddy - * allocator. + * There were no pages to initialize and free which means the zone's + * memory map is completely initialized. */ - while (spfn < epfn) { - /* update our first deferred PFN for this section */ - first_deferred_pfn =3D spfn; - - nr_pages +=3D deferred_init_maxorder(&i, zone, &spfn, &epfn); - touch_nmi_watchdog(); - - /* We should only stop along section boundaries */ - if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) - continue; - - /* If our quota has been met we can stop here */ - if (nr_pages >=3D nr_pages_needed) - break; - } + pgdat->first_deferred_pfn =3D nr_pages ? spfn : ULONG_MAX; =20 - pgdat->first_deferred_pfn =3D spfn; pgdat_resize_unlock(pgdat, &flags); =20 return nr_pages > 0; --=20 2.50.1