From nobody Fri Feb 13 07:32:11 2026 Received: from invmail4.hynix.com (exvmail4.skhynix.com [166.125.252.92]) by smtp.subspace.kernel.org (Postfix) with ESMTP id 0535413D532 for ; Fri, 31 May 2024 09:20:28 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=166.125.252.92 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717147231; cv=none; b=m+rHvzgiVNRyrI+YX04/blUxZcyNFGi3qwITQDCmU/oY1bptr1Wtg/LBZCAqzEW6IhPz7BZgiQ6UZd2wN4xkfBrv4X3COBdMv0PpudwYm0tNibQplR8BKgM+lIg2sQta1GeNHlV/w3O+ftGmOFcyTvhYt5Br0WheYuDPG8gbhII= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1717147231; c=relaxed/simple; bh=IfnJmTeO+yghusg9q3mIouw1WOiUL6zWSD5MfvRSC7M=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References; b=Wmq00lpdyxdd3agFZEEOAwFg+qpDk7xojyhR/fkmqYR6SCaAO9xq4w/x7E0caI0V0LcTFZ7BMehY/FQiu6SS9C6SAJLCebawSlFLYlCbUoDyys+HPtu1yW5CRQUgO0jGBplFGaw2rYlsuf4hzQnG+/BUIt1xkGfMMuiMUlzlBiE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=sk.com; spf=pass smtp.mailfrom=sk.com; arc=none smtp.client-ip=166.125.252.92 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=sk.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=sk.com X-AuditID: a67dfc5b-d85ff70000001748-71-6659964c48ee From: Byungchul Park To: linux-kernel@vger.kernel.org, linux-mm@kvack.org Cc: kernel_team@skhynix.com, akpm@linux-foundation.org, ying.huang@intel.com, vernhao@tencent.com, mgorman@techsingularity.net, hughd@google.com, willy@infradead.org, david@redhat.com, peterz@infradead.org, luto@kernel.org, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de, dave.hansen@linux.intel.com, rjgolo@gmail.com Subject: [PATCH v11 10/12] mm: separate move/undo parts from migrate_pages_batch() Date: Fri, 31 May 2024 18:19:59 +0900 Message-Id: <20240531092001.30428-11-byungchul@sk.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20240531092001.30428-1-byungchul@sk.com> References: <20240531092001.30428-1-byungchul@sk.com> X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFnrKLMWRmVeSWpSXmKPExsXC9ZZnka7PtMg0g9e/FCzmrF/DZvF5wz82 ixcb2hktvq7/xWzx9FMfi8XlXXPYLO6t+c9qcX7XWlaLHUv3MVlcOrCAyeJ47wEmi/n3PrNZ bN40ldni+JSpjBa/fwAVn5w1mcVBwON7ax+Lx85Zd9k9Fmwq9di8Qstj8Z6XTB6bVnWyeWz6 NInd4925c+weJ2b8ZvGYdzLQ4/2+q2weW3/ZeTROvcbm8XmTXABfFJdNSmpOZllqkb5dAlfG pG2tbAU/tSue7OpjamCcrtzFyMkhIWAiMfHDK2YY++OVjewgNpuAusSNGz/B4iICZhIHW/+A xZkF7jJJHOhnA7GFBYIk9u9rYgWxWQRUJb7unwMU5+DgBar/cJodYqS8xOoNB8DGcAKFD/y9 wwhiCwmYSiz63wtkcwHVvGeT2DKvmwmiQVLi4IobLBMYeRcwMqxiFMrMK8tNzMwx0cuozMus 0EvOz93ECAz8ZbV/oncwfroQfIhRgINRiYc3oCIiTYg1say4MvcQowQHs5II7690oBBvSmJl VWpRfnxRaU5q8SFGaQ4WJXFeo2/lKUIC6YklqdmpqQWpRTBZJg5OqQZGnetHNrLGrPlk3vP3 mulSi6JXU7pEDnWkP5jtr3pLRSFjtWCprdsHkZeiiWsX7Z7/j3dZ1xO26Mmrhfh7Yy7+1Nox JUI/xe+3ZNuF629fKkxQK1rYc5v3x5sli+tU1CUsvNx4N+vJqVWEPGv86cSolrJSIfaDy5e+ aXeeBzFbWH3MS6wW28quxFKckWioxVxUnAgA8yXjLHgCAAA= X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFjrDLMWRmVeSWpSXmKPExsXC5WfdrOszLTLNYPYPMYs569ewWXze8I/N 4sWGdkaLr+t/MVs8/dTHYnF47klWi8u75rBZ3Fvzn9Xi/K61rBY7lu5jsrh0YAGTxfHeA0wW 8+99ZrPYvGkqs8XxKVMZLX7/ACo+OWsyi4Ogx/fWPhaPnbPusnss2FTqsXmFlsfiPS+ZPDat 6mTz2PRpErvHu3Pn2D1OzPjN4jHvZKDH+31X2TwWv/jA5LH1l51H49RrbB6fN8kF8Edx2aSk 5mSWpRbp2yVwZUza1spW8FO74smuPqYGxunKXYycHBICJhIfr2xkB7HZBNQlbtz4yQxiiwiY SRxs/QMWZxa4yyRxoJ8NxBYWCJLYv6+JFcRmEVCV+Lp/DlCcg4MXqP7DaXaIkfISqzccABvD CRQ+8PcOI4gtJGAqseh/L+MERq4FjAyrGEUy88pyEzNzTPWKszMq8zIr9JLzczcxAsN4We2f iTsYv1x2P8QowMGoxMMbUBGRJsSaWFZcmXuIUYKDWUmE91c6UIg3JbGyKrUoP76oNCe1+BCj NAeLkjivV3hqgpBAemJJanZqakFqEUyWiYNTqoExrqdV8t2/qR1ef4pYd7D0LVR5Ip0mHLxC znjDU6cJUyYarLhzY0NCgv+Ba75Lv7a6hvZ9TPwp2sMlUnwldduion+sJoH2nzIt/+fO/aQe +KbYrmD1MvVlyef/rKyM75vaeXp7+bUl+3onTmQ2nx8oMS0+VeveQ9GKWGstJafpXyLzF63/ Nz9JiaU4I9FQi7moOBEAAlW2u18CAAA= X-CFilter-Loop: Reflected Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Functionally, no change. This is a preparation for luf mechanism that requires to use separated folio lists for its own handling during migration. Refactored migrate_pages_batch() so as to separate move/undo parts from migrate_pages_batch(). Signed-off-by: Byungchul Park --- mm/migrate.c | 134 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 83 insertions(+), 51 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index e04b451c4289..6c22a1402923 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1584,6 +1584,81 @@ static int migrate_hugetlbs(struct list_head *from, = new_folio_t get_new_folio, return nr_failed; } =20 +static void migrate_folios_move(struct list_head *src_folios, + struct list_head *dst_folios, + free_folio_t put_new_folio, unsigned long private, + enum migrate_mode mode, int reason, + struct list_head *ret_folios, + struct migrate_pages_stats *stats, + int *retry, int *thp_retry, int *nr_failed, + int *nr_retry_pages) +{ + struct folio *folio, *folio2, *dst, *dst2; + bool is_thp; + int nr_pages; + int rc; + + dst =3D list_first_entry(dst_folios, struct folio, lru); + dst2 =3D list_next_entry(dst, lru); + list_for_each_entry_safe(folio, folio2, src_folios, lru) { + is_thp =3D folio_test_large(folio) && folio_test_pmd_mappable(folio); + nr_pages =3D folio_nr_pages(folio); + + cond_resched(); + + rc =3D migrate_folio_move(put_new_folio, private, + folio, dst, mode, + reason, ret_folios); + /* + * The rules are: + * Success: folio will be freed + * -EAGAIN: stay on the unmap_folios list + * Other errno: put on ret_folios list + */ + switch(rc) { + case -EAGAIN: + *retry +=3D 1; + *thp_retry +=3D is_thp; + *nr_retry_pages +=3D nr_pages; + break; + case MIGRATEPAGE_SUCCESS: + stats->nr_succeeded +=3D nr_pages; + stats->nr_thp_succeeded +=3D is_thp; + break; + default: + *nr_failed +=3D 1; + stats->nr_thp_failed +=3D is_thp; + stats->nr_failed_pages +=3D nr_pages; + break; + } + dst =3D dst2; + dst2 =3D list_next_entry(dst, lru); + } +} + +static void migrate_folios_undo(struct list_head *src_folios, + struct list_head *dst_folios, + free_folio_t put_new_folio, unsigned long private, + struct list_head *ret_folios) +{ + struct folio *folio, *folio2, *dst, *dst2; + + dst =3D list_first_entry(dst_folios, struct folio, lru); + dst2 =3D list_next_entry(dst, lru); + list_for_each_entry_safe(folio, folio2, src_folios, lru) { + int old_page_state =3D 0; + struct anon_vma *anon_vma =3D NULL; + + __migrate_folio_extract(dst, &old_page_state, &anon_vma); + migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED, + anon_vma, true, ret_folios); + list_del(&dst->lru); + migrate_folio_undo_dst(dst, true, put_new_folio, private); + dst =3D dst2; + dst2 =3D list_next_entry(dst, lru); + } +} + /* * migrate_pages_batch() first unmaps folios in the from list as many as * possible, then move the unmapped folios. @@ -1606,7 +1681,7 @@ static int migrate_pages_batch(struct list_head *from, int pass =3D 0; bool is_thp =3D false; bool is_large =3D false; - struct folio *folio, *folio2, *dst =3D NULL, *dst2; + struct folio *folio, *folio2, *dst =3D NULL; int rc, rc_saved =3D 0, nr_pages; LIST_HEAD(unmap_folios); LIST_HEAD(dst_folios); @@ -1765,42 +1840,11 @@ static int migrate_pages_batch(struct list_head *fr= om, thp_retry =3D 0; nr_retry_pages =3D 0; =20 - dst =3D list_first_entry(&dst_folios, struct folio, lru); - dst2 =3D list_next_entry(dst, lru); - list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { - is_thp =3D folio_test_large(folio) && folio_test_pmd_mappable(folio); - nr_pages =3D folio_nr_pages(folio); - - cond_resched(); - - rc =3D migrate_folio_move(put_new_folio, private, - folio, dst, mode, - reason, ret_folios); - /* - * The rules are: - * Success: folio will be freed - * -EAGAIN: stay on the unmap_folios list - * Other errno: put on ret_folios list - */ - switch(rc) { - case -EAGAIN: - retry++; - thp_retry +=3D is_thp; - nr_retry_pages +=3D nr_pages; - break; - case MIGRATEPAGE_SUCCESS: - stats->nr_succeeded +=3D nr_pages; - stats->nr_thp_succeeded +=3D is_thp; - break; - default: - nr_failed++; - stats->nr_thp_failed +=3D is_thp; - stats->nr_failed_pages +=3D nr_pages; - break; - } - dst =3D dst2; - dst2 =3D list_next_entry(dst, lru); - } + /* Move the unmapped folios */ + migrate_folios_move(&unmap_folios, &dst_folios, + put_new_folio, private, mode, reason, + ret_folios, stats, &retry, &thp_retry, + &nr_failed, &nr_retry_pages); } nr_failed +=3D retry; stats->nr_thp_failed +=3D thp_retry; @@ -1809,20 +1853,8 @@ static int migrate_pages_batch(struct list_head *fro= m, rc =3D rc_saved ? : nr_failed; out: /* Cleanup remaining folios */ - dst =3D list_first_entry(&dst_folios, struct folio, lru); - dst2 =3D list_next_entry(dst, lru); - list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { - int old_page_state =3D 0; - struct anon_vma *anon_vma =3D NULL; - - __migrate_folio_extract(dst, &old_page_state, &anon_vma); - migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED, - anon_vma, true, ret_folios); - list_del(&dst->lru); - migrate_folio_undo_dst(dst, true, put_new_folio, private); - dst =3D dst2; - dst2 =3D list_next_entry(dst, lru); - } + migrate_folios_undo(&unmap_folios, &dst_folios, + put_new_folio, private, ret_folios); =20 return rc; } --=20 2.17.1