From: Zi Yan <ziy@nvidia.com>
No function change is intended. The factored out code will be reused in
an upcoming batched folio move function.
Signed-off-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
mm/migrate.c | 106 ++++++++++++++++++++++++++++++++-------------------
1 file changed, 67 insertions(+), 39 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index 9e5ef39ce73a..ad03e7257847 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1061,19 +1061,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
return migrate_folio(mapping, dst, src, mode);
}
-/*
- * Move a src folio to a newly allocated dst folio.
- *
- * The src and dst folios are locked and the src folios was unmapped from
- * the page tables.
- *
- * On success, the src folio was replaced by the dst folio.
- *
- * Return value:
- * < 0 - error code
- * MIGRATEPAGE_SUCCESS - success
- */
-static int move_to_new_folio(struct folio *dst, struct folio *src,
+static int _move_to_new_folio_prep(struct folio *dst, struct folio *src,
enum migrate_mode mode)
{
struct address_space *mapping = folio_mapping(src);
@@ -1098,7 +1086,12 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
mode);
else
rc = fallback_migrate_folio(mapping, dst, src, mode);
+ return rc;
+}
+static void _move_to_new_folio_finalize(struct folio *dst, struct folio *src,
+ int rc)
+{
if (rc == MIGRATEPAGE_SUCCESS) {
/*
* For pagecache folios, src->mapping must be cleared before src
@@ -1110,6 +1103,29 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
if (likely(!folio_is_zone_device(dst)))
flush_dcache_folio(dst);
}
+}
+
+/*
+ * Move a src folio to a newly allocated dst folio.
+ *
+ * The src and dst folios are locked and the src folios was unmapped from
+ * the page tables.
+ *
+ * On success, the src folio was replaced by the dst folio.
+ *
+ * Return value:
+ * < 0 - error code
+ * MIGRATEPAGE_SUCCESS - success
+ */
+static int move_to_new_folio(struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+{
+ int rc;
+
+ rc = _move_to_new_folio_prep(dst, src, mode);
+
+ _move_to_new_folio_finalize(dst, src, rc);
+
return rc;
}
@@ -1345,32 +1361,9 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
return rc;
}
-/* Migrate the folio to the newly allocated folio in dst. */
-static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
- struct folio *src, struct folio *dst,
- enum migrate_mode mode, enum migrate_reason reason,
- struct list_head *ret)
+static void _migrate_folio_move_finalize1(struct folio *src, struct folio *dst,
+ int old_page_state)
{
- int rc;
- int old_page_state = 0;
- struct anon_vma *anon_vma = NULL;
- struct list_head *prev;
-
- __migrate_folio_extract(dst, &old_page_state, &anon_vma);
- prev = dst->lru.prev;
- list_del(&dst->lru);
-
- if (unlikely(page_has_movable_ops(&src->page))) {
- rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
- if (rc)
- goto out;
- goto out_unlock_both;
- }
-
- rc = move_to_new_folio(dst, src, mode);
- if (rc)
- goto out;
-
/*
* When successful, push dst to LRU immediately: so that if it
* turns out to be an mlocked page, remove_migration_ptes() will
@@ -1386,8 +1379,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
if (old_page_state & PAGE_WAS_MAPPED)
remove_migration_ptes(src, dst, 0);
+}
-out_unlock_both:
+static void _migrate_folio_move_finalize2(struct folio *src, struct folio *dst,
+ enum migrate_reason reason,
+ struct anon_vma *anon_vma)
+{
folio_unlock(dst);
folio_set_owner_migrate_reason(dst, reason);
/*
@@ -1407,6 +1404,37 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
put_anon_vma(anon_vma);
folio_unlock(src);
migrate_folio_done(src, reason);
+}
+
+/* Migrate the folio to the newly allocated folio in dst. */
+static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
+ struct folio *src, struct folio *dst,
+ enum migrate_mode mode, enum migrate_reason reason,
+ struct list_head *ret)
+{
+ int rc;
+ int old_page_state = 0;
+ struct anon_vma *anon_vma = NULL;
+ struct list_head *prev;
+
+ __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+ prev = dst->lru.prev;
+ list_del(&dst->lru);
+
+ if (unlikely(page_has_movable_ops(&src->page))) {
+ rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
+ if (rc)
+ goto out;
+ goto out_unlock_both;
+ }
+
+ rc = move_to_new_folio(dst, src, mode);
+ if (rc)
+ goto out;
+
+ _migrate_folio_move_finalize1(src, dst, old_page_state);
+out_unlock_both:
+ _migrate_folio_move_finalize2(src, dst, reason, anon_vma);
return rc;
out:
--
2.43.0
On Tue, 23 Sep 2025 17:47:36 +0000
Shivank Garg <shivankg@amd.com> wrote:
> From: Zi Yan <ziy@nvidia.com>
>
> No function change is intended. The factored out code will be reused in
> an upcoming batched folio move function.
>
> Signed-off-by: Zi Yan <ziy@nvidia.com>
> Signed-off-by: Shivank Garg <shivankg@amd.com>
Hi. A few code structure things inline.
The naming of the various helpers needs some more thought I think as
with it like this the loss of readability of existing code is painful.
Jonathan
> ---
> mm/migrate.c | 106 ++++++++++++++++++++++++++++++++-------------------
> 1 file changed, 67 insertions(+), 39 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 9e5ef39ce73a..ad03e7257847 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1061,19 +1061,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
> return migrate_folio(mapping, dst, src, mode);
> }
>
> -/*
> - * Move a src folio to a newly allocated dst folio.
> - *
> - * The src and dst folios are locked and the src folios was unmapped from
> - * the page tables.
> - *
> - * On success, the src folio was replaced by the dst folio.
> - *
> - * Return value:
> - * < 0 - error code
> - * MIGRATEPAGE_SUCCESS - success
> - */
> -static int move_to_new_folio(struct folio *dst, struct folio *src,
> +static int _move_to_new_folio_prep(struct folio *dst, struct folio *src,
I'm not sure the _ prefix is needed. Or maybe it should be __ like
__buffer_migrate_folio()
> enum migrate_mode mode)
> {
> struct address_space *mapping = folio_mapping(src);
> @@ -1098,7 +1086,12 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
> mode);
> else
> rc = fallback_migrate_folio(mapping, dst, src, mode);
> + return rc;
May be worth switching this whole function to early returns given we no longer
have a shared block of stuff to do at the end.
if (!mapping)
return migrate_folio(mapping, st, src, mode);
if (mapping_inaccessible(mapping))
return -EOPNOTSUPP;
if (mapping->a_ops->migrate_folio)
return mapping->a_ops->migrate_folio(mapping, dst, src, mode);
return fallback_migrate_folio(mapping, dst, src, mode);
> +}
>
> +static void _move_to_new_folio_finalize(struct folio *dst, struct folio *src,
> + int rc)
> +{
> if (rc == MIGRATEPAGE_SUCCESS) {
Perhaps
if (rc != MIGRATE_PAGE_SUCCESS)
return rc;
/*
* For pagecache folios,....
...
return rc;
Unless other stuff is likely to get added in here.
Or drag the condition to the caller.
> /*
> * For pagecache folios, src->mapping must be cleared before src
> @@ -1110,6 +1103,29 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
> if (likely(!folio_is_zone_device(dst)))
> flush_dcache_folio(dst);
> }
> +}
> +
> +/*
> + * Move a src folio to a newly allocated dst folio.
> + *
> + * The src and dst folios are locked and the src folios was unmapped from
> + * the page tables.
> + *
> + * On success, the src folio was replaced by the dst folio.
> + *
> + * Return value:
> + * < 0 - error code
> + * MIGRATEPAGE_SUCCESS - success
> + */
> +static int move_to_new_folio(struct folio *dst, struct folio *src,
> + enum migrate_mode mode)
> +{
> + int rc;
> +
> + rc = _move_to_new_folio_prep(dst, src, mode);
> +
> + _move_to_new_folio_finalize(dst, src, rc);
> +
> return rc;
> }
>
> @@ -1345,32 +1361,9 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
> return rc;
> }
>
> -/* Migrate the folio to the newly allocated folio in dst. */
> -static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
> - struct folio *src, struct folio *dst,
> - enum migrate_mode mode, enum migrate_reason reason,
> - struct list_head *ret)
> +static void _migrate_folio_move_finalize1(struct folio *src, struct folio *dst,
> + int old_page_state)
> {
> - int rc;
> - int old_page_state = 0;
> - struct anon_vma *anon_vma = NULL;
> - struct list_head *prev;
> -
> - __migrate_folio_extract(dst, &old_page_state, &anon_vma);
> - prev = dst->lru.prev;
> - list_del(&dst->lru);
> -
> - if (unlikely(page_has_movable_ops(&src->page))) {
> - rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
> - if (rc)
> - goto out;
> - goto out_unlock_both;
> - }
> -
> - rc = move_to_new_folio(dst, src, mode);
> - if (rc)
> - goto out;
> -
> /*
> * When successful, push dst to LRU immediately: so that if it
> * turns out to be an mlocked page, remove_migration_ptes() will
> @@ -1386,8 +1379,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
>
> if (old_page_state & PAGE_WAS_MAPPED)
> remove_migration_ptes(src, dst, 0);
> +}
>
> -out_unlock_both:
> +static void _migrate_folio_move_finalize2(struct folio *src, struct folio *dst,
> + enum migrate_reason reason,
> + struct anon_vma *anon_vma)
> +{
> folio_unlock(dst);
> folio_set_owner_migrate_reason(dst, reason);
> /*
> @@ -1407,6 +1404,37 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
> put_anon_vma(anon_vma);
> folio_unlock(src);
> migrate_folio_done(src, reason);
> +}
> +
> +/* Migrate the folio to the newly allocated folio in dst. */
> +static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
> + struct folio *src, struct folio *dst,
> + enum migrate_mode mode, enum migrate_reason reason,
> + struct list_head *ret)
> +{
> + int rc;
> + int old_page_state = 0;
> + struct anon_vma *anon_vma = NULL;
> + struct list_head *prev;
> +
> + __migrate_folio_extract(dst, &old_page_state, &anon_vma);
> + prev = dst->lru.prev;
> + list_del(&dst->lru);
> +
> + if (unlikely(page_has_movable_ops(&src->page))) {
> + rc = migrate_movable_ops_page(&dst->page, &src->page, mode);
> + if (rc)
> + goto out;
> + goto out_unlock_both;
I would drop this..
> + }
and do
} else {
rc = move_to_new_folio(dst, src, mode);
if (rc)
goto out;
_migrate_folio_move_finalize1(src, dst, old_page_state);
}
_migrate_folio_move_finalize2(src, dst, reason, anon_vma);
return rc;
This makes sense now as the amount of code indented more in this approach
is much smaller than it would have been before you factored stuff out.
> +
> + rc = move_to_new_folio(dst, src, mode);
> + if (rc)
> + goto out;
> +
Hmm. These two functions might be useful but this is hurting readability
here. Can we come up with some more meaningful names perhaps?
> + _migrate_folio_move_finalize1(src, dst, old_page_state);
> +out_unlock_both:
> + _migrate_folio_move_finalize2(src, dst, reason, anon_vma);
>
> return rc;
> out:
© 2016 - 2026 Red Hat, Inc.