[PATCH v1 11/29] mm/migrate: move movable_ops page handling out of move_to_new_folio()

David Hildenbrand posted 29 patches 3 months, 1 week ago
There is a newer version of this series
[PATCH v1 11/29] mm/migrate: move movable_ops page handling out of move_to_new_folio()
Posted by David Hildenbrand 3 months, 1 week ago
Let's move that handling directly into migrate_folio_move(), so we can
simplify move_to_new_folio(). While at it, fixup the documentation a
bit.

Note that unmap_and_move_huge_page() does not care, because it only
deals with actual folios. (we only support migration of
individual movable_ops pages)

Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
 mm/migrate.c | 63 +++++++++++++++++++++++++---------------------------
 1 file changed, 30 insertions(+), 33 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 0898ddd2f661f..22c115710d0e2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1024,11 +1024,12 @@ static int fallback_migrate_folio(struct address_space *mapping,
 }
 
 /*
- * Move a page to a newly allocated page
- * The page is locked and all ptes have been successfully removed.
+ * Move a src folio to a newly allocated dst folio.
  *
- * The new page will have replaced the old page if this function
- * is successful.
+ * The src and dst folios are locked and the src folios was unmapped from
+ * the page tables.
+ *
+ * On success, the src folio was replaced by the dst folio.
  *
  * Return value:
  *   < 0 - error code
@@ -1037,34 +1038,30 @@ static int fallback_migrate_folio(struct address_space *mapping,
 static int move_to_new_folio(struct folio *dst, struct folio *src,
 				enum migrate_mode mode)
 {
+	struct address_space *mapping = folio_mapping(src);
 	int rc = -EAGAIN;
-	bool is_lru = !__folio_test_movable(src);
 
 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
 
-	if (likely(is_lru)) {
-		struct address_space *mapping = folio_mapping(src);
-
-		if (!mapping)
-			rc = migrate_folio(mapping, dst, src, mode);
-		else if (mapping_inaccessible(mapping))
-			rc = -EOPNOTSUPP;
-		else if (mapping->a_ops->migrate_folio)
-			/*
-			 * Most folios have a mapping and most filesystems
-			 * provide a migrate_folio callback. Anonymous folios
-			 * are part of swap space which also has its own
-			 * migrate_folio callback. This is the most common path
-			 * for page migration.
-			 */
-			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
-								mode);
-		else
-			rc = fallback_migrate_folio(mapping, dst, src, mode);
+	if (!mapping)
+		rc = migrate_folio(mapping, dst, src, mode);
+	else if (mapping_inaccessible(mapping))
+		rc = -EOPNOTSUPP;
+	else if (mapping->a_ops->migrate_folio)
+		/*
+		 * Most folios have a mapping and most filesystems
+		 * provide a migrate_folio callback. Anonymous folios
+		 * are part of swap space which also has its own
+		 * migrate_folio callback. This is the most common path
+		 * for page migration.
+		 */
+		rc = mapping->a_ops->migrate_folio(mapping, dst, src,
+							mode);
+	else
+		rc = fallback_migrate_folio(mapping, dst, src, mode);
 
-		if (rc != MIGRATEPAGE_SUCCESS)
-			goto out;
+	if (rc == MIGRATEPAGE_SUCCESS) {
 		/*
 		 * For pagecache folios, src->mapping must be cleared before src
 		 * is freed. Anonymous folios must stay anonymous until freed.
@@ -1074,10 +1071,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
 
 		if (likely(!folio_is_zone_device(dst)))
 			flush_dcache_folio(dst);
-	} else {
-		rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
 	}
-out:
 	return rc;
 }
 
@@ -1328,20 +1322,23 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
 	int rc;
 	int old_page_state = 0;
 	struct anon_vma *anon_vma = NULL;
-	bool is_lru = !__folio_test_movable(src);
 	struct list_head *prev;
 
 	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
 	prev = dst->lru.prev;
 	list_del(&dst->lru);
 
+	if (unlikely(__folio_test_movable(src))) {
+		rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
+		if (rc)
+			goto out;
+		goto out_unlock_both;
+	}
+
 	rc = move_to_new_folio(dst, src, mode);
 	if (rc)
 		goto out;
 
-	if (unlikely(!is_lru))
-		goto out_unlock_both;
-
 	/*
 	 * When successful, push dst to LRU immediately: so that if it
 	 * turns out to be an mlocked page, remove_migration_ptes() will
-- 
2.49.0
Re: [PATCH v1 11/29] mm/migrate: move movable_ops page handling out of move_to_new_folio()
Posted by Lorenzo Stoakes 3 months, 1 week ago
On Mon, Jun 30, 2025 at 02:59:52PM +0200, David Hildenbrand wrote:
> Let's move that handling directly into migrate_folio_move(), so we can
> simplify move_to_new_folio(). While at it, fixup the documentation a
> bit.
>
> Note that unmap_and_move_huge_page() does not care, because it only
> deals with actual folios. (we only support migration of
> individual movable_ops pages)

Important caveat here :)

>
> Reviewed-by: Zi Yan <ziy@nvidia.com>
> Signed-off-by: David Hildenbrand <david@redhat.com>

LGTM, so:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

> ---
>  mm/migrate.c | 63 +++++++++++++++++++++++++---------------------------
>  1 file changed, 30 insertions(+), 33 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 0898ddd2f661f..22c115710d0e2 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1024,11 +1024,12 @@ static int fallback_migrate_folio(struct address_space *mapping,
>  }
>
>  /*
> - * Move a page to a newly allocated page
> - * The page is locked and all ptes have been successfully removed.
> + * Move a src folio to a newly allocated dst folio.
>   *
> - * The new page will have replaced the old page if this function
> - * is successful.
> + * The src and dst folios are locked and the src folios was unmapped from
> + * the page tables.
> + *
> + * On success, the src folio was replaced by the dst folio.
>   *
>   * Return value:
>   *   < 0 - error code
> @@ -1037,34 +1038,30 @@ static int fallback_migrate_folio(struct address_space *mapping,
>  static int move_to_new_folio(struct folio *dst, struct folio *src,
>  				enum migrate_mode mode)
>  {
> +	struct address_space *mapping = folio_mapping(src);
>  	int rc = -EAGAIN;
> -	bool is_lru = !__folio_test_movable(src);

This is_lru was already sketchy, !movable_ops doesn't imply on lru...

>
>  	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
>  	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
>
> -	if (likely(is_lru)) {
> -		struct address_space *mapping = folio_mapping(src);
> -
> -		if (!mapping)
> -			rc = migrate_folio(mapping, dst, src, mode);
> -		else if (mapping_inaccessible(mapping))
> -			rc = -EOPNOTSUPP;
> -		else if (mapping->a_ops->migrate_folio)
> -			/*
> -			 * Most folios have a mapping and most filesystems
> -			 * provide a migrate_folio callback. Anonymous folios
> -			 * are part of swap space which also has its own
> -			 * migrate_folio callback. This is the most common path
> -			 * for page migration.
> -			 */
> -			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
> -								mode);
> -		else
> -			rc = fallback_migrate_folio(mapping, dst, src, mode);
> +	if (!mapping)
> +		rc = migrate_folio(mapping, dst, src, mode);
> +	else if (mapping_inaccessible(mapping))
> +		rc = -EOPNOTSUPP;
> +	else if (mapping->a_ops->migrate_folio)
> +		/*
> +		 * Most folios have a mapping and most filesystems
> +		 * provide a migrate_folio callback. Anonymous folios
> +		 * are part of swap space which also has its own
> +		 * migrate_folio callback. This is the most common path
> +		 * for page migration.
> +		 */
> +		rc = mapping->a_ops->migrate_folio(mapping, dst, src,
> +							mode);
> +	else
> +		rc = fallback_migrate_folio(mapping, dst, src, mode);
>
> -		if (rc != MIGRATEPAGE_SUCCESS)
> -			goto out;
> +	if (rc == MIGRATEPAGE_SUCCESS) {
>  		/*
>  		 * For pagecache folios, src->mapping must be cleared before src
>  		 * is freed. Anonymous folios must stay anonymous until freed.
> @@ -1074,10 +1071,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
>
>  		if (likely(!folio_is_zone_device(dst)))
>  			flush_dcache_folio(dst);
> -	} else {
> -		rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
>  	}
> -out:
>  	return rc;
>  }
>
> @@ -1328,20 +1322,23 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
>  	int rc;
>  	int old_page_state = 0;
>  	struct anon_vma *anon_vma = NULL;
> -	bool is_lru = !__folio_test_movable(src);
>  	struct list_head *prev;
>
>  	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
>  	prev = dst->lru.prev;
>  	list_del(&dst->lru);
>
> +	if (unlikely(__folio_test_movable(src))) {
> +		rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
> +		if (rc)
> +			goto out;
> +		goto out_unlock_both;
> +	}
> +
>  	rc = move_to_new_folio(dst, src, mode);
>  	if (rc)
>  		goto out;
>
> -	if (unlikely(!is_lru))
> -		goto out_unlock_both;
> -
>  	/*
>  	 * When successful, push dst to LRU immediately: so that if it
>  	 * turns out to be an mlocked page, remove_migration_ptes() will
> --
> 2.49.0
>
Re: [PATCH v1 11/29] mm/migrate: move movable_ops page handling out of move_to_new_folio()
Posted by Harry Yoo 3 months, 1 week ago
On Mon, Jun 30, 2025 at 02:59:52PM +0200, David Hildenbrand wrote:
> Let's move that handling directly into migrate_folio_move(), so we can
> simplify move_to_new_folio(). While at it, fixup the documentation a
> bit.
> 
> Note that unmap_and_move_huge_page() does not care, because it only
> deals with actual folios. (we only support migration of
> individual movable_ops pages)
> 
> Reviewed-by: Zi Yan <ziy@nvidia.com>
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---

Looks correct to me.
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>

-- 
Cheers,
Harry / Hyeonggon