From nobody Fri Dec 26 19:23:08 2025 Received: from out-186.mta1.migadu.com (out-186.mta1.migadu.com [95.215.58.186]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 411B7FBEF for ; Tue, 2 Jan 2024 13:13:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.dev Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.dev header.i=@linux.dev header.b="wXNKNFN5" X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.dev; s=key1; t=1704201199; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=5jUJr4R6zMb6dInOd1l8MHQjoDUi83oQkc19tydUOUM=; b=wXNKNFN5V2QUIIEBJ0cDhA/1XejAaQRMJMF38Uej4HkKHGocMOKkJyR8FJHUYIBh8b/xd0 hARFT4JBoor5YBUOWh0h26mYqSlEI0jo+wKgsAE/hBQ5trdzV5u88ffrxWuDT82wCOF+uR okbyj35zpon6VTuBdHmYdXzMfgwdPIQ= From: Gang Li To: David Hildenbrand , David Rientjes , Mike Kravetz , Muchun Song , Andrew Morton , Tim Chen Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, ligang.bdlg@bytedance.com, Gang Li Subject: [PATCH v3 1/7] hugetlb: code clean for hugetlb_hstate_alloc_pages Date: Tue, 2 Jan 2024 21:12:43 +0800 Message-Id: <20240102131249.76622-2-gang.li@linux.dev> In-Reply-To: <20240102131249.76622-1-gang.li@linux.dev> References: <20240102131249.76622-1-gang.li@linux.dev> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Migadu-Flow: FLOW_OUT Content-Type: text/plain; charset="utf-8" The readability of `hugetlb_hstate_alloc_pages` is poor. By cleaning the code, its readability can be improved, facilitating future modifications. This patch extracts two functions to reduce the complexity of `hugetlb_hstate_alloc_pages` and has no functional changes. - hugetlb_hstate_alloc_pages_node_specific() to handle iterates through each online node and performs allocation if necessary. - hugetlb_hstate_alloc_pages_report() report error during allocation. And the value of h->max_huge_pages is updated accordingly. Signed-off-by: Gang Li Reviewed-by: Muchun Song Reviewed-by: Tim Chen --- mm/hugetlb.c | 46 +++++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ed1581b670d42..2606135ec55e6 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3482,6 +3482,33 @@ static void __init hugetlb_hstate_alloc_pages_onenod= e(struct hstate *h, int nid) h->max_huge_pages_node[nid] =3D i; } =20 +static bool __init hugetlb_hstate_alloc_pages_node_specific(struct hstate = *h) +{ + int i; + bool node_specific_alloc =3D false; + + for_each_online_node(i) { + if (h->max_huge_pages_node[i] > 0) { + hugetlb_hstate_alloc_pages_onenode(h, i); + node_specific_alloc =3D true; + } + } + + return node_specific_alloc; +} + +static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocat= ed, struct hstate *h) +{ + if (allocated < h->max_huge_pages) { + char buf[32]; + + string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); + pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated= %lu hugepages.\n", + h->max_huge_pages, buf, allocated); + h->max_huge_pages =3D allocated; + } +} + /* * NOTE: this routine is called in different contexts for gigantic and * non-gigantic pages. @@ -3499,7 +3526,6 @@ static void __init hugetlb_hstate_alloc_pages(struct = hstate *h) struct folio *folio; LIST_HEAD(folio_list); nodemask_t *node_alloc_noretry; - bool node_specific_alloc =3D false; =20 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ if (hstate_is_gigantic(h) && hugetlb_cma_size) { @@ -3508,14 +3534,7 @@ static void __init hugetlb_hstate_alloc_pages(struct= hstate *h) } =20 /* do node specific alloc */ - for_each_online_node(i) { - if (h->max_huge_pages_node[i] > 0) { - hugetlb_hstate_alloc_pages_onenode(h, i); - node_specific_alloc =3D true; - } - } - - if (node_specific_alloc) + if (hugetlb_hstate_alloc_pages_node_specific(h)) return; =20 /* below will do all node balanced alloc */ @@ -3558,14 +3577,7 @@ static void __init hugetlb_hstate_alloc_pages(struct= hstate *h) /* list will be empty if hstate_is_gigantic */ prep_and_add_allocated_folios(h, &folio_list); =20 - if (i < h->max_huge_pages) { - char buf[32]; - - string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); - pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated= %lu hugepages.\n", - h->max_huge_pages, buf, i); - h->max_huge_pages =3D i; - } + hugetlb_hstate_alloc_pages_report(i, h); kfree(node_alloc_noretry); } =20 --=20 2.20.1 From nobody Fri Dec 26 19:23:08 2025 Received: from out-176.mta1.migadu.com (out-176.mta1.migadu.com [95.215.58.176]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0065E101C5 for ; Tue, 2 Jan 2024 13:13:25 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.dev Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.dev header.i=@linux.dev header.b="B9yLyBVp" X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.dev; s=key1; t=1704201204; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=ETU4P5ZGwiSCKQ/C0kQ3RzHFu60VRj1ek3R3KqiB2x4=; b=B9yLyBVpFwjem6cRBg02/FU+9CM9GKbsl9V80HrvKR7Xo4obGT9RZP65pcS01nZHOo+QLL BoVk6IqxBdZxgvPUdD0cIMvaqetNp8gkUYHpG9SIv8mtTQz6dO7kMQJ+yIvKDYa1e8VSDa 7Rb5Vl4PpfwdO3JiDeI+0+ONiRDKJa8= From: Gang Li To: David Hildenbrand , David Rientjes , Mike Kravetz , Muchun Song , Andrew Morton , Tim Chen Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, ligang.bdlg@bytedance.com, Gang Li Subject: [PATCH v3 2/7] hugetlb: split hugetlb_hstate_alloc_pages Date: Tue, 2 Jan 2024 21:12:44 +0800 Message-Id: <20240102131249.76622-3-gang.li@linux.dev> In-Reply-To: <20240102131249.76622-1-gang.li@linux.dev> References: <20240102131249.76622-1-gang.li@linux.dev> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Migadu-Flow: FLOW_OUT Content-Type: text/plain; charset="utf-8" 1G and 2M huge pages have different allocation and initialization logic, which leads to subtle differences in parallelization. Therefore, it is appropriate to split hugetlb_hstate_alloc_pages into gigantic and non-gigantic. This patch has no functional changes. Signed-off-by: Gang Li --- mm/hugetlb.c | 86 +++++++++++++++++++++++++++------------------------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2606135ec55e6..92448e747991d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3509,6 +3509,47 @@ static void __init hugetlb_hstate_alloc_pages_report= (unsigned long allocated, st } } =20 +static unsigned long __init hugetlb_hstate_alloc_pages_gigantic(struct hst= ate *h) +{ + unsigned long i; + + for (i =3D 0; i < h->max_huge_pages; ++i) { + /* + * gigantic pages not added to list as they are not + * added to pools now. + */ + if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) + break; + cond_resched(); + } + + return i; +} + +static unsigned long __init hugetlb_hstate_alloc_pages_non_gigantic(struct= hstate *h) +{ + unsigned long i; + struct folio *folio; + LIST_HEAD(folio_list); + nodemask_t node_alloc_noretry; + + /* Bit mask controlling how hard we retry per-node allocations.*/ + nodes_clear(node_alloc_noretry); + + for (i =3D 0; i < h->max_huge_pages; ++i) { + folio =3D alloc_pool_huge_folio(h, &node_states[N_MEMORY], + &node_alloc_noretry); + if (!folio) + break; + list_add(&folio->lru, &folio_list); + cond_resched(); + } + + prep_and_add_allocated_folios(h, &folio_list); + + return i; +} + /* * NOTE: this routine is called in different contexts for gigantic and * non-gigantic pages. @@ -3522,10 +3563,7 @@ static void __init hugetlb_hstate_alloc_pages_report= (unsigned long allocated, st */ static void __init hugetlb_hstate_alloc_pages(struct hstate *h) { - unsigned long i; - struct folio *folio; - LIST_HEAD(folio_list); - nodemask_t *node_alloc_noretry; + unsigned long allocated; =20 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ if (hstate_is_gigantic(h) && hugetlb_cma_size) { @@ -3539,46 +3577,12 @@ static void __init hugetlb_hstate_alloc_pages(struc= t hstate *h) =20 /* below will do all node balanced alloc */ if (!hstate_is_gigantic(h)) { - /* - * Bit mask controlling how hard we retry per-node allocations. - * Ignore errors as lower level routines can deal with - * node_alloc_noretry =3D=3D NULL. If this kmalloc fails at boot - * time, we are likely in bigger trouble. - */ - node_alloc_noretry =3D kmalloc(sizeof(*node_alloc_noretry), - GFP_KERNEL); + allocated =3D hugetlb_hstate_alloc_pages_non_gigantic(h); } else { - /* allocations done at boot time */ - node_alloc_noretry =3D NULL; - } - - /* bit mask controlling how hard we retry per-node allocations */ - if (node_alloc_noretry) - nodes_clear(*node_alloc_noretry); - - for (i =3D 0; i < h->max_huge_pages; ++i) { - if (hstate_is_gigantic(h)) { - /* - * gigantic pages not added to list as they are not - * added to pools now. - */ - if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) - break; - } else { - folio =3D alloc_pool_huge_folio(h, &node_states[N_MEMORY], - node_alloc_noretry); - if (!folio) - break; - list_add(&folio->lru, &folio_list); - } - cond_resched(); + allocated =3D hugetlb_hstate_alloc_pages_gigantic(h); } =20 - /* list will be empty if hstate_is_gigantic */ - prep_and_add_allocated_folios(h, &folio_list); - - hugetlb_hstate_alloc_pages_report(i, h); - kfree(node_alloc_noretry); + hugetlb_hstate_alloc_pages_report(allocated, h); } =20 static void __init hugetlb_init_hstates(void) --=20 2.20.1 From nobody Fri Dec 26 19:23:08 2025 Received: from out-175.mta1.migadu.com (out-175.mta1.migadu.com [95.215.58.175]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 182BC10A0D for ; Tue, 2 Jan 2024 13:13:29 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.dev Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.dev header.i=@linux.dev header.b="Eko96B8O" X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.dev; s=key1; t=1704201208; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=0BwxzTkbv6fMj2IlVqqVSI+5ecpQCg7jHconULcXMhU=; b=Eko96B8OaBJVKkDyhD/UM2q4AYrrNykiYUCYNcfOZ1A32BsBy/zQcEdL/Y94CQ0FhNyARP ty7rzZ3ZddQ0yUhcxHQr6aLFHRTzuQy4e+fg9Mn0btG4rccV9gwgX3Wz1I1oA73vy9Mlfb eNChOv/zhRPqCB0SpDniNhAQ6XPQGOE= From: Gang Li To: David Hildenbrand , David Rientjes , Mike Kravetz , Muchun Song , Andrew Morton , Tim Chen Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, ligang.bdlg@bytedance.com, Gang Li Subject: [PATCH v3 3/7] padata: dispatch works on different nodes Date: Tue, 2 Jan 2024 21:12:45 +0800 Message-Id: <20240102131249.76622-4-gang.li@linux.dev> In-Reply-To: <20240102131249.76622-1-gang.li@linux.dev> References: <20240102131249.76622-1-gang.li@linux.dev> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Migadu-Flow: FLOW_OUT Content-Type: text/plain; charset="utf-8" When a group of tasks that access different nodes are scheduled on the same node, they may encounter bandwidth bottlenecks and access latency. Thus, numa_aware flag is introduced here, allowing tasks to be distributed across different nodes to fully utilize the advantage of multi-node systems. Signed-off-by: Gang Li --- include/linux/padata.h | 3 +++ kernel/padata.c | 8 ++++++-- mm/mm_init.c | 1 + 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/include/linux/padata.h b/include/linux/padata.h index 495b16b6b4d72..f79ccd50e7f40 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -137,6 +137,8 @@ struct padata_shell { * appropriate for one worker thread to do at once. * @max_threads: Max threads to use for the job, actual number may be less * depending on task size and minimum chunk size. + * @numa_aware: Dispatch jobs to different nodes. If a node only has memor= y but + * no CPU, dispatch its jobs to a random CPU. */ struct padata_mt_job { void (*thread_fn)(unsigned long start, unsigned long end, void *arg); @@ -146,6 +148,7 @@ struct padata_mt_job { unsigned long align; unsigned long min_chunk; int max_threads; + bool numa_aware; }; =20 /** diff --git a/kernel/padata.c b/kernel/padata.c index 179fb1518070c..1c2b3a337479e 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -485,7 +485,7 @@ void __init padata_do_multithreaded(struct padata_mt_jo= b *job) struct padata_work my_work, *pw; struct padata_mt_job_state ps; LIST_HEAD(works); - int nworks; + int nworks, nid =3D 0; =20 if (job->size =3D=3D 0) return; @@ -517,7 +517,11 @@ void __init padata_do_multithreaded(struct padata_mt_j= ob *job) ps.chunk_size =3D roundup(ps.chunk_size, job->align); =20 list_for_each_entry(pw, &works, pw_list) - queue_work(system_unbound_wq, &pw->pw_work); + if (job->numa_aware) + queue_work_node((++nid % num_node_state(N_MEMORY)), + system_unbound_wq, &pw->pw_work); + else + queue_work(system_unbound_wq, &pw->pw_work); =20 /* Use the current thread, which saves starting a workqueue worker. */ padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK); diff --git a/mm/mm_init.c b/mm/mm_init.c index 89dc29f1e6c6f..59fcffddf65a3 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2225,6 +2225,7 @@ static int __init deferred_init_memmap(void *data) .align =3D PAGES_PER_SECTION, .min_chunk =3D PAGES_PER_SECTION, .max_threads =3D max_threads, + .numa_aware =3D false, }; =20 padata_do_multithreaded(&job); --=20 2.20.1 From nobody Fri Dec 26 19:23:08 2025 Received: from out-177.mta1.migadu.com (out-177.mta1.migadu.com [95.215.58.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3A66C125B5 for ; Tue, 2 Jan 2024 13:13:33 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.dev Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.dev header.i=@linux.dev header.b="qcQ/Nui/" X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.dev; s=key1; t=1704201212; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=1vziwaehcp4Mlf2+5m5PWlp5vkO7nD1McNxi9fSvMFU=; b=qcQ/Nui/5Ddo4hq2TnILxFLnHXPOYrnuSz5qK6UAQm45+GQS+0RyxWTYvckmzZSTJRgHK6 y8iYkcup7wCN7qn8SKZwnf6eds3fWGGP/aglezu4KP6rMrK/MznoUTelkMah5ZTP2EvQrM uC8anNSTIvbFRJYUaxaebIz6ZnbBmiE= From: Gang Li To: David Hildenbrand , David Rientjes , Mike Kravetz , Muchun Song , Andrew Morton , Tim Chen Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, ligang.bdlg@bytedance.com, Gang Li Subject: [PATCH v3 4/7] hugetlb: pass *next_nid_to_alloc directly to for_each_node_mask_to_alloc Date: Tue, 2 Jan 2024 21:12:46 +0800 Message-Id: <20240102131249.76622-5-gang.li@linux.dev> In-Reply-To: <20240102131249.76622-1-gang.li@linux.dev> References: <20240102131249.76622-1-gang.li@linux.dev> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Migadu-Flow: FLOW_OUT Content-Type: text/plain; charset="utf-8" The parallelization of hugetlb allocation leads to errors when sharing h->next_nid_to_alloc across different threads. To address this, it's necessary to assign a separate next_nid_to_alloc for each thread. Consequently, the hstate_next_node_to_alloc and for_each_node_mask_to_alloc have been modified to directly accept a *next_nid_to_alloc parameter, ensuring thread-specific allocation and avoiding concurrent access issues. Signed-off-by: Gang Li --- This patch seems not elegant, but I can't come up with anything better. Any suggestions will be highly appreciated! --- mm/hugetlb.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 92448e747991d..a71bc1622b53b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1464,15 +1464,15 @@ static int get_valid_node_allowed(int nid, nodemask= _t *nodes_allowed) * next node from which to allocate, handling wrap at end of node * mask. */ -static int hstate_next_node_to_alloc(struct hstate *h, +static int hstate_next_node_to_alloc(int *next_nid_to_alloc, nodemask_t *nodes_allowed) { int nid; =20 VM_BUG_ON(!nodes_allowed); =20 - nid =3D get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); - h->next_nid_to_alloc =3D next_node_allowed(nid, nodes_allowed); + nid =3D get_valid_node_allowed(*next_nid_to_alloc, nodes_allowed); + *next_nid_to_alloc =3D next_node_allowed(nid, nodes_allowed); =20 return nid; } @@ -1495,10 +1495,10 @@ static int hstate_next_node_to_free(struct hstate *= h, nodemask_t *nodes_allowed) return nid; } =20 -#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ +#define for_each_node_mask_to_alloc(next_nid_to_alloc, nr_nodes, node, mas= k) \ for (nr_nodes =3D nodes_weight(*mask); \ nr_nodes > 0 && \ - ((node =3D hstate_next_node_to_alloc(hs, mask)) || 1); \ + ((node =3D hstate_next_node_to_alloc(next_nid_to_alloc, mask)) || 1); \ nr_nodes--) =20 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ @@ -2350,12 +2350,13 @@ static void prep_and_add_allocated_folios(struct hs= tate *h, */ static struct folio *alloc_pool_huge_folio(struct hstate *h, nodemask_t *nodes_allowed, - nodemask_t *node_alloc_noretry) + nodemask_t *node_alloc_noretry, + int *next_nid_to_alloc) { gfp_t gfp_mask =3D htlb_alloc_mask(h) | __GFP_THISNODE; int nr_nodes, node; =20 - for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { + for_each_node_mask_to_alloc(next_nid_to_alloc, nr_nodes, node, nodes_allo= wed) { struct folio *folio; =20 folio =3D only_alloc_fresh_hugetlb_folio(h, gfp_mask, node, @@ -3310,7 +3311,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int n= id) goto found; } /* allocate from next node when distributing huge pages */ - for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { + for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_= states[N_MEMORY]) { m =3D memblock_alloc_try_nid_raw( huge_page_size(h), huge_page_size(h), 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); @@ -3684,7 +3685,7 @@ static int adjust_pool_surplus(struct hstate *h, node= mask_t *nodes_allowed, VM_BUG_ON(delta !=3D -1 && delta !=3D 1); =20 if (delta < 0) { - for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { + for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes= _allowed) { if (h->surplus_huge_pages_node[node]) goto found; } @@ -3799,7 +3800,8 @@ static int set_max_huge_pages(struct hstate *h, unsig= ned long count, int nid, cond_resched(); =20 folio =3D alloc_pool_huge_folio(h, nodes_allowed, - node_alloc_noretry); + node_alloc_noretry, + &h->next_nid_to_alloc); if (!folio) { prep_and_add_allocated_folios(h, &page_list); spin_lock_irq(&hugetlb_lock); --=20 2.20.1 From nobody Fri Dec 26 19:23:08 2025 Received: from out-185.mta1.migadu.com (out-185.mta1.migadu.com [95.215.58.185]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3137312E70 for ; Tue, 2 Jan 2024 13:13:37 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.dev Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.dev header.i=@linux.dev header.b="LLGCqrdb" X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.dev; s=key1; t=1704201216; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=uwJWRA1Ti2ov5Iev54G7xXAXLlg2CRbAiefTt3ZwHf4=; b=LLGCqrdbm54bgGR02vZyuR1VdBL4mcjuR/BGT/9+I895KeZ7fyx+zd7BxXb0RBDzZmE25q AMVfKXgVW6wXKRG7kBGALWtRZYvIMnFzhrilIzjsqIv/oXeOP1qJqaKxBRlTDA+uY6LEko g3MUn0pBdC3pivvQ/woTMSibZfbEMZQ= From: Gang Li To: David Hildenbrand , David Rientjes , Mike Kravetz , Muchun Song , Andrew Morton , Tim Chen Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, ligang.bdlg@bytedance.com, Gang Li Subject: [PATCH v3 5/7] hugetlb: have CONFIG_HUGETLBFS select CONFIG_PADATA Date: Tue, 2 Jan 2024 21:12:47 +0800 Message-Id: <20240102131249.76622-6-gang.li@linux.dev> In-Reply-To: <20240102131249.76622-1-gang.li@linux.dev> References: <20240102131249.76622-1-gang.li@linux.dev> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Migadu-Flow: FLOW_OUT Content-Type: text/plain; charset="utf-8" Now hugetlb uses padata_do_multithreaded for parallel initialization, so select CONFIG_PADATA. Signed-off-by: Gang Li Reviewed-by: Muchun Song --- fs/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/Kconfig b/fs/Kconfig index 89fdbefd1075f..a57d6e6c41e6f 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -262,6 +262,7 @@ menuconfig HUGETLBFS depends on X86 || SPARC64 || ARCH_SUPPORTS_HUGETLBFS || BROKEN depends on (SYSFS || SYSCTL) select MEMFD_CREATE + select PADATA help hugetlbfs is a filesystem backing for HugeTLB pages, based on ramfs. For architectures that support it, say Y here and read --=20 2.20.1 From nobody Fri Dec 26 19:23:08 2025 Received: from out-177.mta1.migadu.com (out-177.mta1.migadu.com [95.215.58.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 936B814AAA for ; Tue, 2 Jan 2024 13:13:43 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.dev Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.dev header.i=@linux.dev header.b="rOSB7/ml" X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.dev; s=key1; t=1704201222; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=lq6odAt2f+wReMUr3LVecCPvhipoB8fbQDkgCZGAfFs=; b=rOSB7/mlWJwm7Sgj4858g08l3Mah9AIYX19x1bg4TM3sAFEv9Sl0kgizs7/gcRTPswrfp0 vOOopJqL+bE11QDfDg2MV8rTX8iV8fJMNIn3ouKYEcKaZ82/9tLxyXxN5xbIYQpLmzlMAU 1lf390N2ueiFxZGQLNGz7dsM80AvN8Q= From: Gang Li To: David Hildenbrand , David Rientjes , Mike Kravetz , Muchun Song , Andrew Morton , Tim Chen Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, ligang.bdlg@bytedance.com, Gang Li Subject: [PATCH v3 6/7] hugetlb: parallelize 2M hugetlb allocation and initialization Date: Tue, 2 Jan 2024 21:12:48 +0800 Message-Id: <20240102131249.76622-7-gang.li@linux.dev> In-Reply-To: <20240102131249.76622-1-gang.li@linux.dev> References: <20240102131249.76622-1-gang.li@linux.dev> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Migadu-Flow: FLOW_OUT Content-Type: text/plain; charset="utf-8" By distributing both the allocation and the initialization tasks across multiple threads, the initialization of 2M hugetlb will be faster, thereby improving the boot speed. Here are some test results: test no patch(ms) patched(ms) saved ------------------- -------------- ------------- -------- 256c2t(4 node) 2M 3336 1051 68.52% 128c1t(2 node) 2M 1943 716 63.15% Signed-off-by: Gang Li --- mm/hugetlb.c | 72 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 19 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a71bc1622b53b..d1629df5f399f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -35,6 +35,7 @@ #include #include #include +#include =20 #include #include @@ -3510,6 +3511,38 @@ static void __init hugetlb_hstate_alloc_pages_report= (unsigned long allocated, st } } =20 +static void __init hugetlb_alloc_node(unsigned long start, unsigned long e= nd, void *arg) +{ + struct hstate *h =3D (struct hstate *)arg; + int i, num =3D end - start; + nodemask_t node_alloc_noretry; + unsigned long flags; + int next_nid_to_alloc =3D 0; + + /* Bit mask controlling how hard we retry per-node allocations.*/ + nodes_clear(node_alloc_noretry); + + for (i =3D 0; i < num; ++i) { + struct folio *folio =3D alloc_pool_huge_folio(h, &node_states[N_MEMORY], + &node_alloc_noretry, &next_nid_to_alloc); + if (!folio) + break; + spin_lock_irqsave(&hugetlb_lock, flags); + __prep_account_new_huge_page(h, folio_nid(folio)); + enqueue_hugetlb_folio(h, folio); + spin_unlock_irqrestore(&hugetlb_lock, flags); + cond_resched(); + } +} + +static void __init hugetlb_vmemmap_optimize_node(unsigned long start, unsi= gned long end, void *arg) +{ + struct hstate *h =3D (struct hstate *)arg; + int nid =3D start; + + hugetlb_vmemmap_optimize_folios(h, &h->hugepage_freelists[nid]); +} + static unsigned long __init hugetlb_hstate_alloc_pages_gigantic(struct hst= ate *h) { unsigned long i; @@ -3529,26 +3562,27 @@ static unsigned long __init hugetlb_hstate_alloc_pa= ges_gigantic(struct hstate *h =20 static unsigned long __init hugetlb_hstate_alloc_pages_non_gigantic(struct= hstate *h) { - unsigned long i; - struct folio *folio; - LIST_HEAD(folio_list); - nodemask_t node_alloc_noretry; - - /* Bit mask controlling how hard we retry per-node allocations.*/ - nodes_clear(node_alloc_noretry); - - for (i =3D 0; i < h->max_huge_pages; ++i) { - folio =3D alloc_pool_huge_folio(h, &node_states[N_MEMORY], - &node_alloc_noretry); - if (!folio) - break; - list_add(&folio->lru, &folio_list); - cond_resched(); - } - - prep_and_add_allocated_folios(h, &folio_list); + struct padata_mt_job job =3D { + .fn_arg =3D h, + .align =3D 1, + .numa_aware =3D true + }; =20 - return i; + job.thread_fn =3D hugetlb_alloc_node; + job.start =3D 0; + job.size =3D h->max_huge_pages; + job.min_chunk =3D h->max_huge_pages / num_node_state(N_MEMORY) / 2; + job.max_threads =3D num_node_state(N_MEMORY) * 2; + padata_do_multithreaded(&job); + + job.thread_fn =3D hugetlb_vmemmap_optimize_node; + job.start =3D 0; + job.size =3D num_node_state(N_MEMORY); + job.min_chunk =3D 1; + job.max_threads =3D num_node_state(N_MEMORY); + padata_do_multithreaded(&job); + + return h->nr_huge_pages; } =20 /* --=20 2.20.1 From nobody Fri Dec 26 19:23:08 2025 Received: from out-182.mta1.migadu.com (out-182.mta1.migadu.com [95.215.58.182]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4386B14F61 for ; Tue, 2 Jan 2024 13:13:48 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.dev Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.dev Authentication-Results: smtp.subspace.kernel.org; dkim=pass (1024-bit key) header.d=linux.dev header.i=@linux.dev header.b="wooXcU/d" X-Report-Abuse: Please report any abuse attempt to abuse@migadu.com and include these headers. DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.dev; s=key1; t=1704201226; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=PV0S7W1YGzJ0pEPZj279MYSeM1ClpTdHk3joXGH5wj8=; b=wooXcU/dpB/9amxwWmyP5fx/4g8+s4Wt1e1ITrrRQgXh2eG2jtsfU0P9ici3t88EkmQfCM MJRecVKTfFX3pTTZU6TfsN5mYo5/0+ARbG4H+KzX5EFaHgXHY+NU17YkBHCK8+K0nyvYdy VrYNkQIKDDwuAPmaCwH0QTwGhcuFBOI= From: Gang Li To: David Hildenbrand , David Rientjes , Mike Kravetz , Muchun Song , Andrew Morton , Tim Chen Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org, ligang.bdlg@bytedance.com, Gang Li Subject: [PATCH v3 7/7] hugetlb: parallelize 1G hugetlb initialization Date: Tue, 2 Jan 2024 21:12:49 +0800 Message-Id: <20240102131249.76622-8-gang.li@linux.dev> In-Reply-To: <20240102131249.76622-1-gang.li@linux.dev> References: <20240102131249.76622-1-gang.li@linux.dev> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Migadu-Flow: FLOW_OUT Content-Type: text/plain; charset="utf-8" Optimizing the initialization speed of 1G huge pages through parallelization. 1G hugetlbs are allocated from bootmem, a process that is already very fast and does not currently require optimization. Therefore, we focus on parallelizing only the initialization phase in `gather_bootmem_prealloc`. Here are some test results: test no patch(ms) patched(ms) saved ------------------- -------------- ------------- -------- 256c2t(4 node) 1G 4745 2024 57.34% 128c1t(2 node) 1G 3358 1712 49.02% 12t 1G 77000 18300 76.23% Signed-off-by: Gang Li --- include/linux/hugetlb.h | 2 +- mm/hugetlb.c | 40 +++++++++++++++++++++++++++++++++------- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index c1ee640d87b11..77b30a8c6076b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -178,7 +178,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_a= rea_struct *vma, struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); =20 extern int sysctl_hugetlb_shm_group; -extern struct list_head huge_boot_pages; +extern struct list_head huge_boot_pages[MAX_NUMNODES]; =20 /* arch callbacks */ =20 diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d1629df5f399f..e5a55707f8814 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -69,7 +69,7 @@ static bool hugetlb_cma_folio(struct folio *folio, unsign= ed int order) #endif static unsigned long hugetlb_cma_size __initdata; =20 -__initdata LIST_HEAD(huge_boot_pages); +__initdata struct list_head huge_boot_pages[MAX_NUMNODES]; =20 /* for command line parsing */ static struct hstate * __initdata parsed_hstate; @@ -3339,7 +3339,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int n= id) huge_page_size(h) - PAGE_SIZE); /* Put them into a private list first because mem_map is not up yet */ INIT_LIST_HEAD(&m->list); - list_add(&m->list, &huge_boot_pages); + list_add(&m->list, &huge_boot_pages[node]); m->hstate =3D h; return 1; } @@ -3390,8 +3390,6 @@ static void __init prep_and_add_bootmem_folios(struct= hstate *h, /* Send list for bulk vmemmap optimization processing */ hugetlb_vmemmap_optimize_folios(h, folio_list); =20 - /* Add all new pool pages to free lists in one lock cycle */ - spin_lock_irqsave(&hugetlb_lock, flags); list_for_each_entry_safe(folio, tmp_f, folio_list, lru) { if (!folio_test_hugetlb_vmemmap_optimized(folio)) { /* @@ -3404,23 +3402,27 @@ static void __init prep_and_add_bootmem_folios(stru= ct hstate *h, HUGETLB_VMEMMAP_RESERVE_PAGES, pages_per_huge_page(h)); } + /* Subdivide locks to achieve better parallel performance */ + spin_lock_irqsave(&hugetlb_lock, flags); __prep_account_new_huge_page(h, folio_nid(folio)); enqueue_hugetlb_folio(h, folio); + spin_unlock_irqrestore(&hugetlb_lock, flags); } - spin_unlock_irqrestore(&hugetlb_lock, flags); } =20 /* * Put bootmem huge pages into the standard lists after mem_map is up. * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages. */ -static void __init gather_bootmem_prealloc(void) +static void __init __gather_bootmem_prealloc(unsigned long start, unsigned= long end, void *arg) + { + int nid =3D start; LIST_HEAD(folio_list); struct huge_bootmem_page *m; struct hstate *h =3D NULL, *prev_h =3D NULL; =20 - list_for_each_entry(m, &huge_boot_pages, list) { + list_for_each_entry(m, &huge_boot_pages[nid], list) { struct page *page =3D virt_to_page(m); struct folio *folio =3D (void *)page; =20 @@ -3453,6 +3455,22 @@ static void __init gather_bootmem_prealloc(void) prep_and_add_bootmem_folios(h, &folio_list); } =20 +static void __init gather_bootmem_prealloc(void) +{ + struct padata_mt_job job =3D { + .thread_fn =3D __gather_bootmem_prealloc, + .fn_arg =3D NULL, + .start =3D 0, + .size =3D num_node_state(N_MEMORY), + .align =3D 1, + .min_chunk =3D 1, + .max_threads =3D num_node_state(N_MEMORY), + .numa_aware =3D true, + }; + + padata_do_multithreaded(&job); +} + static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, in= t nid) { unsigned long i; @@ -3606,6 +3624,14 @@ static void __init hugetlb_hstate_alloc_pages(struct= hstate *h) return; } =20 + /* hugetlb_hstate_alloc_pages will be called many times, init huge_boot_p= ages once*/ + if (huge_boot_pages[0].next =3D=3D NULL) { + int i =3D 0; + + for (i =3D 0; i < MAX_NUMNODES; i++) + INIT_LIST_HEAD(&huge_boot_pages[i]); + } + /* do node specific alloc */ if (hugetlb_hstate_alloc_pages_node_specific(h)) return; --=20 2.20.1