From nobody Wed Dec 17 10:53:25 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 824BDEE49AB for ; Tue, 22 Aug 2023 00:54:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231969AbjHVAyW (ORCPT ); Mon, 21 Aug 2023 20:54:22 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58950 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231950AbjHVAyN (ORCPT ); Mon, 21 Aug 2023 20:54:13 -0400 Received: from out30-97.freemail.mail.aliyun.com (out30-97.freemail.mail.aliyun.com [115.124.30.97]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id CCE26184 for ; Mon, 21 Aug 2023 17:54:11 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R601e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018046049;MF=baolin.wang@linux.alibaba.com;NM=1;PH=DS;RN=8;SR=0;TI=SMTPD_---0VqK0DP4_1692665648; Received: from localhost(mailfrom:baolin.wang@linux.alibaba.com fp:SMTPD_---0VqK0DP4_1692665648) by smtp.aliyun-inc.com; Tue, 22 Aug 2023 08:54:08 +0800 From: Baolin Wang To: akpm@linux-foundation.org Cc: mgorman@techsingularity.net, shy828301@gmail.com, david@redhat.com, ying.huang@intel.com, baolin.wang@linux.alibaba.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 1/4] mm: migrate: factor out migration validation into numa_page_can_migrate() Date: Tue, 22 Aug 2023 08:53:49 +0800 Message-Id: <6e1c5a86b8d960294582a1221a1a20eb66e53b37.1692665449.git.baolin.wang@linux.alibaba.com> X-Mailer: git-send-email 2.39.3 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Now there are several places will validate if a page can migrate or not, so factoring out these validation into a new function to make them more maintainable. Signed-off-by: Baolin Wang --- mm/huge_memory.c | 6 ++++++ mm/internal.h | 1 + mm/memory.c | 30 ++++++++++++++++++++++++++++++ mm/migrate.c | 19 ------------------- 4 files changed, 37 insertions(+), 19 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4465915711c3..4a9b34a89854 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1540,11 +1540,17 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *v= mf) spin_unlock(vmf->ptl); writable =3D false; =20 + if (!numa_page_can_migrate(vma, page)) { + put_page(page); + goto migrate_fail; + } + migrated =3D migrate_misplaced_page(page, vma, target_nid); if (migrated) { flags |=3D TNF_MIGRATED; page_nid =3D target_nid; } else { +migrate_fail: flags |=3D TNF_MIGRATE_FAIL; vmf->ptl =3D pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { diff --git a/mm/internal.h b/mm/internal.h index f59a53111817..1e00b8a30910 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -933,6 +933,7 @@ void __vunmap_range_noflush(unsigned long start, unsign= ed long end); =20 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags); +bool numa_page_can_migrate(struct vm_area_struct *vma, struct page *page); =20 void free_zone_device_page(struct page *page); int migrate_device_coherent_page(struct page *page); diff --git a/mm/memory.c b/mm/memory.c index 12647d139a13..fc6f6b7a70e1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4735,6 +4735,30 @@ int numa_migrate_prep(struct page *page, struct vm_a= rea_struct *vma, return mpol_misplaced(page, vma, addr); } =20 +bool numa_page_can_migrate(struct vm_area_struct *vma, struct page *page) +{ + /* + * Don't migrate file pages that are mapped in multiple processes + * with execute permissions as they are probably shared libraries. + */ + if (page_mapcount(page) !=3D 1 && page_is_file_lru(page) && + (vma->vm_flags & VM_EXEC)) + return false; + + /* + * Also do not migrate dirty pages as not all filesystems can move + * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. + */ + if (page_is_file_lru(page) && PageDirty(page)) + return false; + + /* Do not migrate THP mapped by multiple processes */ + if (PageTransHuge(page) && total_mapcount(page) > 1) + return false; + + return true; +} + static vm_fault_t do_numa_page(struct vm_fault *vmf) { struct vm_area_struct *vma =3D vmf->vma; @@ -4815,11 +4839,17 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); writable =3D false; =20 + if (!numa_page_can_migrate(vma, page)) { + put_page(page); + goto migrate_fail; + } + /* Migrate to the requested node */ if (migrate_misplaced_page(page, vma, target_nid)) { page_nid =3D target_nid; flags |=3D TNF_MIGRATED; } else { +migrate_fail: flags |=3D TNF_MIGRATE_FAIL; vmf->pte =3D pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); diff --git a/mm/migrate.c b/mm/migrate.c index e21d5a7e7447..9cc98fb1d6ec 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2485,10 +2485,6 @@ static int numamigrate_isolate_page(pg_data_t *pgdat= , struct page *page) =20 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); =20 - /* Do not migrate THP mapped by multiple processes */ - if (PageTransHuge(page) && total_mapcount(page) > 1) - return 0; - /* Avoid migrating to a node that is nearly full */ if (!migrate_balanced_pgdat(pgdat, nr_pages)) { int z; @@ -2533,21 +2529,6 @@ int migrate_misplaced_page(struct page *page, struct= vm_area_struct *vma, LIST_HEAD(migratepages); int nr_pages =3D thp_nr_pages(page); =20 - /* - * Don't migrate file pages that are mapped in multiple processes - * with execute permissions as they are probably shared libraries. - */ - if (page_mapcount(page) !=3D 1 && page_is_file_lru(page) && - (vma->vm_flags & VM_EXEC)) - goto out; - - /* - * Also do not migrate dirty pages as not all filesystems can move - * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. - */ - if (page_is_file_lru(page) && PageDirty(page)) - goto out; - isolated =3D numamigrate_isolate_page(pgdat, page); if (!isolated) goto out; --=20 2.39.3 From nobody Wed Dec 17 10:53:25 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C55DEEE4996 for ; Tue, 22 Aug 2023 00:54:29 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231976AbjHVAy2 (ORCPT ); Mon, 21 Aug 2023 20:54:28 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58960 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231952AbjHVAyO (ORCPT ); Mon, 21 Aug 2023 20:54:14 -0400 Received: from out30-130.freemail.mail.aliyun.com (out30-130.freemail.mail.aliyun.com [115.124.30.130]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DC8AAD1 for ; Mon, 21 Aug 2023 17:54:12 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R121e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045170;MF=baolin.wang@linux.alibaba.com;NM=1;PH=DS;RN=8;SR=0;TI=SMTPD_---0VqK0DPh_1692665649; Received: from localhost(mailfrom:baolin.wang@linux.alibaba.com fp:SMTPD_---0VqK0DPh_1692665649) by smtp.aliyun-inc.com; Tue, 22 Aug 2023 08:54:10 +0800 From: Baolin Wang To: akpm@linux-foundation.org Cc: mgorman@techsingularity.net, shy828301@gmail.com, david@redhat.com, ying.huang@intel.com, baolin.wang@linux.alibaba.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 2/4] mm: migrate: move the numamigrate_isolate_page() into do_numa_page() Date: Tue, 22 Aug 2023 08:53:50 +0800 Message-Id: <9ff2a9e3e644103a08b9b84b76b39bbd4c60020b.1692665449.git.baolin.wang@linux.alibaba.com> X-Mailer: git-send-email 2.39.3 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Move the numamigrate_isolate_page() into do_numa_page() to simplify the migrate_misplaced_page(), which now only focuses on page migration, and it also serves as a preparation for supporting batch migration for migrate_misplaced_page(). While we are at it, change the numamigrate_isolate_page() to boolean type to make the return value more clear. Signed-off-by: Baolin Wang --- include/linux/migrate.h | 6 ++++++ mm/huge_memory.c | 7 +++++++ mm/memory.c | 7 +++++++ mm/migrate.c | 22 +++++++--------------- 4 files changed, 27 insertions(+), 15 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 711dd9412561..ddcd62ec2c12 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -144,12 +144,18 @@ const struct movable_operations *page_movable_ops(str= uct page *page) #ifdef CONFIG_NUMA_BALANCING int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); +bool numamigrate_isolate_page(pg_data_t *pgdat, struct page *page); #else static inline int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ } + +static inline bool numamigrate_isolate_page(pg_data_t *pgdat, struct page = *page) +{ + return false; +} #endif /* CONFIG_NUMA_BALANCING */ =20 #ifdef CONFIG_MIGRATION diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4a9b34a89854..07149ead11e4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1496,6 +1496,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) int target_nid, last_cpupid =3D (-1 & LAST_CPUPID_MASK); bool migrated =3D false, writable =3D false; int flags =3D 0; + pg_data_t *pgdat; =20 vmf->ptl =3D pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { @@ -1545,6 +1546,12 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vm= f) goto migrate_fail; } =20 + pgdat =3D NODE_DATA(target_nid); + if (!numamigrate_isolate_page(pgdat, page)) { + put_page(page); + goto migrate_fail; + } + migrated =3D migrate_misplaced_page(page, vma, target_nid); if (migrated) { flags |=3D TNF_MIGRATED; diff --git a/mm/memory.c b/mm/memory.c index fc6f6b7a70e1..4e451b041488 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4769,6 +4769,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) int target_nid; pte_t pte, old_pte; int flags =3D 0; + pg_data_t *pgdat; =20 /* * The "pte" at this point cannot be used safely without @@ -4844,6 +4845,12 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) goto migrate_fail; } =20 + pgdat =3D NODE_DATA(target_nid); + if (!numamigrate_isolate_page(pgdat, page)) { + put_page(page); + goto migrate_fail; + } + /* Migrate to the requested node */ if (migrate_misplaced_page(page, vma, target_nid)) { page_nid =3D target_nid; diff --git a/mm/migrate.c b/mm/migrate.c index 9cc98fb1d6ec..0b2b69a2a7ab 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2478,7 +2478,7 @@ static struct folio *alloc_misplaced_dst_folio(struct= folio *src, return __folio_alloc_node(gfp, order, nid); } =20 -static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) +bool numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) { int nr_pages =3D thp_nr_pages(page); int order =3D compound_order(page); @@ -2496,11 +2496,11 @@ static int numamigrate_isolate_page(pg_data_t *pgda= t, struct page *page) break; } wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); - return 0; + return false; } =20 if (!isolate_lru_page(page)) - return 0; + return false; =20 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru= (page), nr_pages); @@ -2511,7 +2511,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat,= struct page *page) * disappearing underneath us during migration. */ put_page(page); - return 1; + return true; } =20 /* @@ -2523,16 +2523,12 @@ int migrate_misplaced_page(struct page *page, struc= t vm_area_struct *vma, int node) { pg_data_t *pgdat =3D NODE_DATA(node); - int isolated; + int migrated =3D 1; int nr_remaining; unsigned int nr_succeeded; LIST_HEAD(migratepages); int nr_pages =3D thp_nr_pages(page); =20 - isolated =3D numamigrate_isolate_page(pgdat, page); - if (!isolated) - goto out; - list_add(&page->lru, &migratepages); nr_remaining =3D migrate_pages(&migratepages, alloc_misplaced_dst_folio, NULL, node, MIGRATE_ASYNC, @@ -2544,7 +2540,7 @@ int migrate_misplaced_page(struct page *page, struct = vm_area_struct *vma, page_is_file_lru(page), -nr_pages); putback_lru_page(page); } - isolated =3D 0; + migrated =3D 0; } if (nr_succeeded) { count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); @@ -2553,11 +2549,7 @@ int migrate_misplaced_page(struct page *page, struct= vm_area_struct *vma, nr_succeeded); } BUG_ON(!list_empty(&migratepages)); - return isolated; - -out: - put_page(page); - return 0; + return migrated; } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA */ --=20 2.39.3 From nobody Wed Dec 17 10:53:25 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1259DEE4996 for ; Tue, 22 Aug 2023 00:54:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231981AbjHVAye (ORCPT ); Mon, 21 Aug 2023 20:54:34 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58966 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231954AbjHVAyP (ORCPT ); Mon, 21 Aug 2023 20:54:15 -0400 Received: from out30-99.freemail.mail.aliyun.com (out30-99.freemail.mail.aliyun.com [115.124.30.99]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E50BA184 for ; Mon, 21 Aug 2023 17:54:13 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R121e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045192;MF=baolin.wang@linux.alibaba.com;NM=1;PH=DS;RN=8;SR=0;TI=SMTPD_---0VqK0DQ6_1692665650; Received: from localhost(mailfrom:baolin.wang@linux.alibaba.com fp:SMTPD_---0VqK0DQ6_1692665650) by smtp.aliyun-inc.com; Tue, 22 Aug 2023 08:54:11 +0800 From: Baolin Wang To: akpm@linux-foundation.org Cc: mgorman@techsingularity.net, shy828301@gmail.com, david@redhat.com, ying.huang@intel.com, baolin.wang@linux.alibaba.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 3/4] mm: migrate: change migrate_misplaced_page() to support multiple pages migration Date: Tue, 22 Aug 2023 08:53:51 +0800 Message-Id: <02c3d36270705f0dfec1ea583e252464cb48d802.1692665449.git.baolin.wang@linux.alibaba.com> X-Mailer: git-send-email 2.39.3 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Expanding the migrate_misplaced_page() function to allow passing in a list to support multiple pages migration as a preparation to support batch migra= tion for NUMA balancing as well as compound page NUMA balancing in future. Signed-off-by: Baolin Wang --- include/linux/migrate.h | 9 +++++---- mm/huge_memory.c | 5 ++++- mm/memory.c | 4 +++- mm/migrate.c | 26 ++++++++++---------------- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index ddcd62ec2c12..87edce8e939d 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -142,12 +142,13 @@ const struct movable_operations *page_movable_ops(str= uct page *page) } =20 #ifdef CONFIG_NUMA_BALANCING -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, - int node); +int migrate_misplaced_page(struct list_head *migratepages, struct vm_area_= struct *vma, + int source_nid, int target_nid); bool numamigrate_isolate_page(pg_data_t *pgdat, struct page *page); #else -static inline int migrate_misplaced_page(struct page *page, - struct vm_area_struct *vma, int node) +static inline int migrate_misplaced_page(struct list_head *migratepages, + struct vm_area_struct *vma, + int source_nid, int target_nid) { return -EAGAIN; /* can't migrate now */ } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 07149ead11e4..4401a3493544 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1497,6 +1497,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) bool migrated =3D false, writable =3D false; int flags =3D 0; pg_data_t *pgdat; + LIST_HEAD(migratepages); =20 vmf->ptl =3D pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { @@ -1552,7 +1553,9 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) goto migrate_fail; } =20 - migrated =3D migrate_misplaced_page(page, vma, target_nid); + list_add(&page->lru, &migratepages); + migrated =3D migrate_misplaced_page(&migratepages, vma, + page_nid, target_nid); if (migrated) { flags |=3D TNF_MIGRATED; page_nid =3D target_nid; diff --git a/mm/memory.c b/mm/memory.c index 4e451b041488..9e417e8dd5d5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4770,6 +4770,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_t pte, old_pte; int flags =3D 0; pg_data_t *pgdat; + LIST_HEAD(migratepages); =20 /* * The "pte" at this point cannot be used safely without @@ -4851,8 +4852,9 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) goto migrate_fail; } =20 + list_add(&page->lru, &migratepages); /* Migrate to the requested node */ - if (migrate_misplaced_page(page, vma, target_nid)) { + if (migrate_misplaced_page(&migratepages, vma, page_nid, target_nid)) { page_nid =3D target_nid; flags |=3D TNF_MIGRATED; } else { diff --git a/mm/migrate.c b/mm/migrate.c index 0b2b69a2a7ab..fae7224b8e64 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2519,36 +2519,30 @@ bool numamigrate_isolate_page(pg_data_t *pgdat, str= uct page *page) * node. Caller is expected to have an elevated reference count on * the page that will be dropped by this function before returning. */ -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, - int node) +int migrate_misplaced_page(struct list_head *migratepages, struct vm_area_= struct *vma, + int source_nid, int target_nid) { - pg_data_t *pgdat =3D NODE_DATA(node); + pg_data_t *pgdat =3D NODE_DATA(target_nid); int migrated =3D 1; int nr_remaining; unsigned int nr_succeeded; - LIST_HEAD(migratepages); - int nr_pages =3D thp_nr_pages(page); =20 - list_add(&page->lru, &migratepages); - nr_remaining =3D migrate_pages(&migratepages, alloc_misplaced_dst_folio, - NULL, node, MIGRATE_ASYNC, + nr_remaining =3D migrate_pages(migratepages, alloc_misplaced_dst_folio, + NULL, target_nid, MIGRATE_ASYNC, MR_NUMA_MISPLACED, &nr_succeeded); if (nr_remaining) { - if (!list_empty(&migratepages)) { - list_del(&page->lru); - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -nr_pages); - putback_lru_page(page); - } + if (!list_empty(migratepages)) + putback_movable_pages(migratepages); + migrated =3D 0; } if (nr_succeeded) { count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); - if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) + if (!node_is_toptier(source_nid) && node_is_toptier(target_nid)) mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, nr_succeeded); } - BUG_ON(!list_empty(&migratepages)); + BUG_ON(!list_empty(migratepages)); return migrated; } #endif /* CONFIG_NUMA_BALANCING */ --=20 2.39.3 From nobody Wed Dec 17 10:53:25 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C75A9EE4996 for ; Tue, 22 Aug 2023 00:54:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231990AbjHVAyl (ORCPT ); Mon, 21 Aug 2023 20:54:41 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:58976 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231959AbjHVAyQ (ORCPT ); Mon, 21 Aug 2023 20:54:16 -0400 Received: from out30-132.freemail.mail.aliyun.com (out30-132.freemail.mail.aliyun.com [115.124.30.132]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D4078D1 for ; Mon, 21 Aug 2023 17:54:14 -0700 (PDT) X-Alimail-AntiSpam: AC=PASS;BC=-1|-1;BR=01201311R131e4;CH=green;DM=||false|;DS=||;FP=0|-1|-1|-1|0|-1|-1|-1;HT=ay29a033018045170;MF=baolin.wang@linux.alibaba.com;NM=1;PH=DS;RN=8;SR=0;TI=SMTPD_---0VqJzDob_1692665651; Received: from localhost(mailfrom:baolin.wang@linux.alibaba.com fp:SMTPD_---0VqJzDob_1692665651) by smtp.aliyun-inc.com; Tue, 22 Aug 2023 08:54:12 +0800 From: Baolin Wang To: akpm@linux-foundation.org Cc: mgorman@techsingularity.net, shy828301@gmail.com, david@redhat.com, ying.huang@intel.com, baolin.wang@linux.alibaba.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 4/4] mm: migrate: change to return the number of pages migrated successfully Date: Tue, 22 Aug 2023 08:53:52 +0800 Message-Id: <9688ba40be86d7d0af0961e74d2a182ce65f5f8c.1692665449.git.baolin.wang@linux.alibaba.com> X-Mailer: git-send-email 2.39.3 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Change the migrate_misplaced_page() to return the number of pages migrated successfully, which is used to calculate how many pages are failed to migrate for batch migration. For the compound page's NUMA balancing support, it is possible that partial pages were successfully migrated, so it is necessary to return the number of pages that were successfully migrated from migrate_misplaced_page(). Signed-off-by: Baolin Wang --- mm/huge_memory.c | 9 +++++---- mm/memory.c | 4 +++- mm/migrate.c | 5 +---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4401a3493544..951f73d6b5bf 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1494,10 +1494,11 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *v= mf) unsigned long haddr =3D vmf->address & HPAGE_PMD_MASK; int page_nid =3D NUMA_NO_NODE; int target_nid, last_cpupid =3D (-1 & LAST_CPUPID_MASK); - bool migrated =3D false, writable =3D false; + bool writable =3D false; int flags =3D 0; pg_data_t *pgdat; LIST_HEAD(migratepages); + int nr_successed; =20 vmf->ptl =3D pmd_lock(vma->vm_mm, vmf->pmd); if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) { @@ -1554,9 +1555,9 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) } =20 list_add(&page->lru, &migratepages); - migrated =3D migrate_misplaced_page(&migratepages, vma, - page_nid, target_nid); - if (migrated) { + nr_successed =3D migrate_misplaced_page(&migratepages, vma, + page_nid, target_nid); + if (nr_successed) { flags |=3D TNF_MIGRATED; page_nid =3D target_nid; } else { diff --git a/mm/memory.c b/mm/memory.c index 9e417e8dd5d5..2773cd804ee9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4771,6 +4771,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) int flags =3D 0; pg_data_t *pgdat; LIST_HEAD(migratepages); + int nr_succeeded; =20 /* * The "pte" at this point cannot be used safely without @@ -4854,7 +4855,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) =20 list_add(&page->lru, &migratepages); /* Migrate to the requested node */ - if (migrate_misplaced_page(&migratepages, vma, page_nid, target_nid)) { + nr_succeeded =3D migrate_misplaced_page(&migratepages, vma, page_nid, tar= get_nid); + if (nr_succeeded) { page_nid =3D target_nid; flags |=3D TNF_MIGRATED; } else { diff --git a/mm/migrate.c b/mm/migrate.c index fae7224b8e64..5435cfb225ab 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2523,7 +2523,6 @@ int migrate_misplaced_page(struct list_head *migratep= ages, struct vm_area_struct int source_nid, int target_nid) { pg_data_t *pgdat =3D NODE_DATA(target_nid); - int migrated =3D 1; int nr_remaining; unsigned int nr_succeeded; =20 @@ -2533,8 +2532,6 @@ int migrate_misplaced_page(struct list_head *migratep= ages, struct vm_area_struct if (nr_remaining) { if (!list_empty(migratepages)) putback_movable_pages(migratepages); - - migrated =3D 0; } if (nr_succeeded) { count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); @@ -2543,7 +2540,7 @@ int migrate_misplaced_page(struct list_head *migratep= ages, struct vm_area_struct nr_succeeded); } BUG_ON(!list_empty(migratepages)); - return migrated; + return nr_succeeded; } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_NUMA */ --=20 2.39.3