[RFC PATCH v4 2/6] mm/migrate: skip data copy for already-copied folios

Shivank Garg posted 6 patches 3 weeks, 3 days ago
[RFC PATCH v4 2/6] mm/migrate: skip data copy for already-copied folios
Posted by Shivank Garg 3 weeks, 3 days ago
Add a PAGE_ALREADY_COPIED flag to the dst->private migration state.
When set, __migrate_folio() skips folio_mc_copy() and performs
metadata-only migration. All callers currently pass
already_copied=false. The batch-copy path enables it in a later patch.

Move the dst->private state enum earlier in the file so
__migrate_folio() and move_to_new_folio() can see PAGE_ALREADY_COPIED.

Signed-off-by: Shivank Garg <shivankg@amd.com>
---
 mm/migrate.c | 52 +++++++++++++++++++++++++++++++---------------------
 1 file changed, 31 insertions(+), 21 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 1bf2cf8c44dd..1d8c1fb627c9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -848,6 +848,18 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
 }
 EXPORT_SYMBOL(folio_migrate_flags);
 
+/*
+ * To record some information during migration, we use unused private
+ * field of struct folio of the newly allocated destination folio.
+ * This is safe because nobody is using it except us.
+ */
+enum {
+	PAGE_WAS_MAPPED = BIT(0),
+	PAGE_WAS_MLOCKED = BIT(1),
+	PAGE_ALREADY_COPIED = BIT(2),
+	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED | PAGE_ALREADY_COPIED,
+};
+
 /************************************************************
  *                    Migration functions
  ***********************************************************/
@@ -857,14 +869,20 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst,
 			   enum migrate_mode mode)
 {
 	int rc, expected_count = folio_expected_ref_count(src) + 1;
+	bool already_copied = ((unsigned long)dst->private & PAGE_ALREADY_COPIED);
+
+	if (already_copied)
+		dst->private = NULL;
 
 	/* Check whether src does not have extra refs before we do more work */
 	if (folio_ref_count(src) != expected_count)
 		return -EAGAIN;
 
-	rc = folio_mc_copy(dst, src);
-	if (unlikely(rc))
-		return rc;
+	if (!already_copied) {
+		rc = folio_mc_copy(dst, src);
+		if (unlikely(rc))
+			return rc;
+	}
 
 	rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
 	if (rc)
@@ -1088,7 +1106,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
  *     0 - success
  */
 static int move_to_new_folio(struct folio *dst, struct folio *src,
-				enum migrate_mode mode)
+		enum migrate_mode mode, bool already_copied)
 {
 	struct address_space *mapping = folio_mapping(src);
 	int rc = -EAGAIN;
@@ -1096,6 +1114,9 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
 
+	if (already_copied)
+		dst->private = (void *)(unsigned long)PAGE_ALREADY_COPIED;
+
 	if (!mapping)
 		rc = migrate_folio(mapping, dst, src, mode);
 	else if (mapping_inaccessible(mapping))
@@ -1127,17 +1148,6 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
 	return rc;
 }
 
-/*
- * To record some information during migration, we use unused private
- * field of struct folio of the newly allocated destination folio.
- * This is safe because nobody is using it except us.
- */
-enum {
-	PAGE_WAS_MAPPED = BIT(0),
-	PAGE_WAS_MLOCKED = BIT(1),
-	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
-};
-
 static void __migrate_folio_record(struct folio *dst,
 				   int old_page_state,
 				   struct anon_vma *anon_vma)
@@ -1353,7 +1363,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
 			      struct folio *src, struct folio *dst,
 			      enum migrate_mode mode, enum migrate_reason reason,
-			      struct list_head *ret)
+			      struct list_head *ret, bool already_copied)
 {
 	int rc;
 	int old_page_state = 0;
@@ -1371,7 +1381,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
 		goto out_unlock_both;
 	}
 
-	rc = move_to_new_folio(dst, src, mode);
+	rc = move_to_new_folio(dst, src, mode, already_copied);
 	if (rc)
 		goto out;
 
@@ -1519,7 +1529,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
 	}
 
 	if (!folio_mapped(src))
-		rc = move_to_new_folio(dst, src, mode);
+		rc = move_to_new_folio(dst, src, mode, false);
 
 	if (page_was_mapped)
 		remove_migration_ptes(src, !rc ? dst : src, ttu);
@@ -1703,7 +1713,7 @@ static void migrate_folios_move(struct list_head *src_folios,
 		struct list_head *ret_folios,
 		struct migrate_pages_stats *stats,
 		int *retry, int *thp_retry, int *nr_failed,
-		int *nr_retry_pages)
+		int *nr_retry_pages, bool already_copied)
 {
 	struct folio *folio, *folio2, *dst, *dst2;
 	bool is_thp;
@@ -1720,7 +1730,7 @@ static void migrate_folios_move(struct list_head *src_folios,
 
 		rc = migrate_folio_move(put_new_folio, private,
 				folio, dst, mode,
-				reason, ret_folios);
+				reason, ret_folios, already_copied);
 		/*
 		 * The rules are:
 		 *	0: folio will be freed
@@ -1977,7 +1987,7 @@ static int migrate_pages_batch(struct list_head *from,
 		migrate_folios_move(&unmap_folios, &dst_folios,
 				put_new_folio, private, mode, reason,
 				ret_folios, stats, &retry, &thp_retry,
-				&nr_failed, &nr_retry_pages);
+				&nr_failed, &nr_retry_pages, false);
 	}
 	nr_failed += retry;
 	stats->nr_thp_failed += thp_retry;
-- 
2.43.0
Re: [RFC PATCH v4 2/6] mm/migrate: skip data copy for already-copied folios
Posted by Huang, Ying 1 week, 2 days ago
Shivank Garg <shivankg@amd.com> writes:

> Add a PAGE_ALREADY_COPIED flag to the dst->private migration state.
> When set, __migrate_folio() skips folio_mc_copy() and performs
> metadata-only migration. All callers currently pass
> already_copied=false. The batch-copy path enables it in a later patch.
>
> Move the dst->private state enum earlier in the file so
> __migrate_folio() and move_to_new_folio() can see PAGE_ALREADY_COPIED.
>
> Signed-off-by: Shivank Garg <shivankg@amd.com>
> ---
>  mm/migrate.c | 52 +++++++++++++++++++++++++++++++---------------------
>  1 file changed, 31 insertions(+), 21 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 1bf2cf8c44dd..1d8c1fb627c9 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -848,6 +848,18 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
>  }
>  EXPORT_SYMBOL(folio_migrate_flags);
>  
> +/*
> + * To record some information during migration, we use unused private
> + * field of struct folio of the newly allocated destination folio.
> + * This is safe because nobody is using it except us.
> + */
> +enum {
> +	PAGE_WAS_MAPPED = BIT(0),
> +	PAGE_WAS_MLOCKED = BIT(1),
> +	PAGE_ALREADY_COPIED = BIT(2),
> +	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED | PAGE_ALREADY_COPIED,
> +};
> +
>  /************************************************************
>   *                    Migration functions
>   ***********************************************************/
> @@ -857,14 +869,20 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst,
>  			   enum migrate_mode mode)
>  {
>  	int rc, expected_count = folio_expected_ref_count(src) + 1;
> +	bool already_copied = ((unsigned long)dst->private & PAGE_ALREADY_COPIED);
> +
> +	if (already_copied)
> +		dst->private = NULL;
>  
>  	/* Check whether src does not have extra refs before we do more work */
>  	if (folio_ref_count(src) != expected_count)
>  		return -EAGAIN;
>  
> -	rc = folio_mc_copy(dst, src);
> -	if (unlikely(rc))
> -		return rc;
> +	if (!already_copied) {
> +		rc = folio_mc_copy(dst, src);
> +		if (unlikely(rc))
> +			return rc;
> +	}
>  
>  	rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
>  	if (rc)
> @@ -1088,7 +1106,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
>   *     0 - success
>   */
>  static int move_to_new_folio(struct folio *dst, struct folio *src,
> -				enum migrate_mode mode)
> +		enum migrate_mode mode, bool already_copied)
>  {
>  	struct address_space *mapping = folio_mapping(src);
>  	int rc = -EAGAIN;
> @@ -1096,6 +1114,9 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
>  	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
>  	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
>  
> +	if (already_copied)
> +		dst->private = (void *)(unsigned long)PAGE_ALREADY_COPIED;
> +

IMHO, this appears to be an unusual way to pass arguments to a function.
Why not adjust the parameters of migrate_folio()?  How about turning enum
migrate_mode into a bitmask (migrate_flags)?

>  	if (!mapping)
>  		rc = migrate_folio(mapping, dst, src, mode);
>  	else if (mapping_inaccessible(mapping))
> @@ -1127,17 +1148,6 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
>  	return rc;
>  }
>  
> -/*
> - * To record some information during migration, we use unused private
> - * field of struct folio of the newly allocated destination folio.
> - * This is safe because nobody is using it except us.
> - */
> -enum {
> -	PAGE_WAS_MAPPED = BIT(0),
> -	PAGE_WAS_MLOCKED = BIT(1),
> -	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
> -};
> -
>  static void __migrate_folio_record(struct folio *dst,
>  				   int old_page_state,
>  				   struct anon_vma *anon_vma)
> @@ -1353,7 +1363,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
>  static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
>  			      struct folio *src, struct folio *dst,
>  			      enum migrate_mode mode, enum migrate_reason reason,
> -			      struct list_head *ret)
> +			      struct list_head *ret, bool already_copied)
>  {
>  	int rc;
>  	int old_page_state = 0;
> @@ -1371,7 +1381,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
>  		goto out_unlock_both;
>  	}
>  
> -	rc = move_to_new_folio(dst, src, mode);
> +	rc = move_to_new_folio(dst, src, mode, already_copied);
>  	if (rc)
>  		goto out;
>  
> @@ -1519,7 +1529,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
>  	}
>  
>  	if (!folio_mapped(src))
> -		rc = move_to_new_folio(dst, src, mode);
> +		rc = move_to_new_folio(dst, src, mode, false);
>  
>  	if (page_was_mapped)
>  		remove_migration_ptes(src, !rc ? dst : src, ttu);
> @@ -1703,7 +1713,7 @@ static void migrate_folios_move(struct list_head *src_folios,
>  		struct list_head *ret_folios,
>  		struct migrate_pages_stats *stats,
>  		int *retry, int *thp_retry, int *nr_failed,
> -		int *nr_retry_pages)
> +		int *nr_retry_pages, bool already_copied)
>  {
>  	struct folio *folio, *folio2, *dst, *dst2;
>  	bool is_thp;
> @@ -1720,7 +1730,7 @@ static void migrate_folios_move(struct list_head *src_folios,
>  
>  		rc = migrate_folio_move(put_new_folio, private,
>  				folio, dst, mode,
> -				reason, ret_folios);
> +				reason, ret_folios, already_copied);
>  		/*
>  		 * The rules are:
>  		 *	0: folio will be freed
> @@ -1977,7 +1987,7 @@ static int migrate_pages_batch(struct list_head *from,
>  		migrate_folios_move(&unmap_folios, &dst_folios,
>  				put_new_folio, private, mode, reason,
>  				ret_folios, stats, &retry, &thp_retry,
> -				&nr_failed, &nr_retry_pages);
> +				&nr_failed, &nr_retry_pages, false);
>  	}
>  	nr_failed += retry;
>  	stats->nr_thp_failed += thp_retry;

---
Best Regards,
Huang, Ying
Re: [RFC PATCH v4 2/6] mm/migrate: skip data copy for already-copied folios
Posted by David Hildenbrand (Arm) 3 weeks ago
On 3/9/26 13:07, Shivank Garg wrote:
> Add a PAGE_ALREADY_COPIED flag to the dst->private migration state.
> When set, __migrate_folio() skips folio_mc_copy() and performs
> metadata-only migration. All callers currently pass
> already_copied=false. The batch-copy path enables it in a later patch.
> 
> Move the dst->private state enum earlier in the file so
> __migrate_folio() and move_to_new_folio() can see PAGE_ALREADY_COPIED.
> 
> Signed-off-by: Shivank Garg <shivankg@amd.com>
> ---
>  mm/migrate.c | 52 +++++++++++++++++++++++++++++++---------------------
>  1 file changed, 31 insertions(+), 21 deletions(-)
> 
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 1bf2cf8c44dd..1d8c1fb627c9 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -848,6 +848,18 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
>  }
>  EXPORT_SYMBOL(folio_migrate_flags);
>  
> +/*
> + * To record some information during migration, we use unused private
> + * field of struct folio of the newly allocated destination folio.
> + * This is safe because nobody is using it except us.
> + */
> +enum {
> +	PAGE_WAS_MAPPED = BIT(0),
> +	PAGE_WAS_MLOCKED = BIT(1),
> +	PAGE_ALREADY_COPIED = BIT(2),
> +	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED | PAGE_ALREADY_COPIED,

All these states really only apply to proper folios (not movable_ops).
So once we complete decoupling movable_ops migration from folio
migration, these flags would only appear in the folio migration part.

Can we convert them first to state it clearly already that these are
folio migration flags?

FOLIO_MF_WAS_MAPPED

...

-- 
Cheers,

David
Re: [RFC PATCH v4 2/6] mm/migrate: skip data copy for already-copied folios
Posted by Garg, Shivank 2 weeks, 3 days ago

On 3/12/2026 3:14 PM, David Hildenbrand (Arm) wrote:
> On 3/9/26 13:07, Shivank Garg wrote:
>> Add a PAGE_ALREADY_COPIED flag to the dst->private migration state.
>> When set, __migrate_folio() skips folio_mc_copy() and performs
>> metadata-only migration. All callers currently pass
>> already_copied=false. The batch-copy path enables it in a later patch.
>>
>> Move the dst->private state enum earlier in the file so
>> __migrate_folio() and move_to_new_folio() can see PAGE_ALREADY_COPIED.
>>
>> Signed-off-by: Shivank Garg <shivankg@amd.com>
>> ---
>>  mm/migrate.c | 52 +++++++++++++++++++++++++++++++---------------------
>>  1 file changed, 31 insertions(+), 21 deletions(-)
>>
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index 1bf2cf8c44dd..1d8c1fb627c9 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -848,6 +848,18 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
>>  }
>>  EXPORT_SYMBOL(folio_migrate_flags);
>>  
>> +/*
>> + * To record some information during migration, we use unused private
>> + * field of struct folio of the newly allocated destination folio.
>> + * This is safe because nobody is using it except us.
>> + */
>> +enum {
>> +	PAGE_WAS_MAPPED = BIT(0),
>> +	PAGE_WAS_MLOCKED = BIT(1),
>> +	PAGE_ALREADY_COPIED = BIT(2),
>> +	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED | PAGE_ALREADY_COPIED,
> 
> All these states really only apply to proper folios (not movable_ops).
> So once we complete decoupling movable_ops migration from folio
> migration, these flags would only appear in the folio migration part.
> 
> Can we convert them first to state it clearly already that these are
> folio migration flags?
> 
> FOLIO_MF_WAS_MAPPED
> 
> ...
> 
Sure, done.

Should I fold it into the series? Or send it as independent patch as this series would
likely take few more rounds of reviews and discussion.



Subject: [PATCH] mm/migrate: rename PAGE_ migration flags to FOLIO_MF_

These flags only track folio-specific state during migration and are
not used for movable_ops pages. Rename the enum values and the
old_page_state variable to match.

No functional change.

Suggested-by: David Hildenbrand <david@kernel.org>
Signed-off-by: Shivank Garg shivankg@amd.com
---
 mm/migrate.c | 46 +++++++++++++++++++++++-----------------------
 1 file changed, 23 insertions(+), 23 deletions(-)

diff --git a/mm/migrate.c b/mm/migrate.c
index 1bf2cf8c44dd..8c9115cc4586 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1133,26 +1133,26 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
  * This is safe because nobody is using it except us.
  */
 enum {
-	PAGE_WAS_MAPPED = BIT(0),
-	PAGE_WAS_MLOCKED = BIT(1),
-	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
+	FOLIO_MF_WAS_MAPPED = BIT(0),
+	FOLIO_MF_WAS_MLOCKED = BIT(1),
+	FOLIO_MF_OLD_STATES = FOLIO_MF_WAS_MAPPED | FOLIO_MF_WAS_MLOCKED,
 };
 
 static void __migrate_folio_record(struct folio *dst,
-				   int old_page_state,
+				   int old_folio_state,
 				   struct anon_vma *anon_vma)
 {
-	dst->private = (void *)anon_vma + old_page_state;
+	dst->private = (void *)anon_vma + old_folio_state;
 }
 
 static void __migrate_folio_extract(struct folio *dst,
-				   int *old_page_state,
+				   int *old_folio_state,
 				   struct anon_vma **anon_vmap)
 {
 	unsigned long private = (unsigned long)dst->private;
 
-	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
-	*old_page_state = private & PAGE_OLD_STATES;
+	*anon_vmap = (struct anon_vma *)(private & ~FOLIO_MF_OLD_STATES);
+	*old_folio_state = private & FOLIO_MF_OLD_STATES;
 	dst->private = NULL;
 }
 
@@ -1207,7 +1207,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 {
 	struct folio *dst;
 	int rc = -EAGAIN;
-	int old_page_state = 0;
+	int old_folio_state = 0;
 	struct anon_vma *anon_vma = NULL;
 	bool locked = false;
 	bool dst_locked = false;
@@ -1251,7 +1251,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 	}
 	locked = true;
 	if (folio_test_mlocked(src))
-		old_page_state |= PAGE_WAS_MLOCKED;
+		old_folio_state |= FOLIO_MF_WAS_MLOCKED;
 
 	if (folio_test_writeback(src)) {
 		/*
@@ -1300,7 +1300,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 	dst_locked = true;
 
 	if (unlikely(page_has_movable_ops(&src->page))) {
-		__migrate_folio_record(dst, old_page_state, anon_vma);
+		__migrate_folio_record(dst, old_folio_state, anon_vma);
 		return 0;
 	}
 
@@ -1326,11 +1326,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
 			       !folio_test_ksm(src) && !anon_vma, src);
 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
-		old_page_state |= PAGE_WAS_MAPPED;
+		old_folio_state |= FOLIO_MF_WAS_MAPPED;
 	}
 
 	if (!folio_mapped(src)) {
-		__migrate_folio_record(dst, old_page_state, anon_vma);
+		__migrate_folio_record(dst, old_folio_state, anon_vma);
 		return 0;
 	}
 
@@ -1342,7 +1342,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 	if (rc == -EAGAIN)
 		ret = NULL;
 
-	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+	migrate_folio_undo_src(src, old_folio_state & FOLIO_MF_WAS_MAPPED,
 			       anon_vma, locked, ret);
 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
 
@@ -1356,11 +1356,11 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
 			      struct list_head *ret)
 {
 	int rc;
-	int old_page_state = 0;
+	int old_folio_state = 0;
 	struct anon_vma *anon_vma = NULL;
 	struct list_head *prev;
 
-	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
+	__migrate_folio_extract(dst, &old_folio_state, &anon_vma);
 	prev = dst->lru.prev;
 	list_del(&dst->lru);
 
@@ -1385,10 +1385,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
 	 * isolated from the unevictable LRU: but this case is the easiest.
 	 */
 	folio_add_lru(dst);
-	if (old_page_state & PAGE_WAS_MLOCKED)
+	if (old_folio_state & FOLIO_MF_WAS_MLOCKED)
 		lru_add_drain();
 
-	if (old_page_state & PAGE_WAS_MAPPED)
+	if (old_folio_state & FOLIO_MF_WAS_MAPPED)
 		remove_migration_ptes(src, dst, 0);
 
 out_unlock_both:
@@ -1420,11 +1420,11 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
 	 */
 	if (rc == -EAGAIN) {
 		list_add(&dst->lru, prev);
-		__migrate_folio_record(dst, old_page_state, anon_vma);
+		__migrate_folio_record(dst, old_folio_state, anon_vma);
 		return rc;
 	}
 
-	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+	migrate_folio_undo_src(src, old_folio_state & FOLIO_MF_WAS_MAPPED,
 			       anon_vma, true, ret);
 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
 
@@ -1758,11 +1758,11 @@ static void migrate_folios_undo(struct list_head *src_folios,
 	dst = list_first_entry(dst_folios, struct folio, lru);
 	dst2 = list_next_entry(dst, lru);
 	list_for_each_entry_safe(folio, folio2, src_folios, lru) {
-		int old_page_state = 0;
+		int old_folio_state = 0;
 		struct anon_vma *anon_vma = NULL;
 
-		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
-		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
+		__migrate_folio_extract(dst, &old_folio_state, &anon_vma);
+		migrate_folio_undo_src(folio, old_folio_state & FOLIO_MF_WAS_MAPPED,
 				anon_vma, true, ret_folios);
 		list_del(&dst->lru);
 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
-- 

Thanks,
Shivank
Re: [RFC PATCH v4 2/6] mm/migrate: skip data copy for already-copied folios
Posted by David Hildenbrand (Arm) 1 week, 3 days ago
On 3/15/26 19:25, Garg, Shivank wrote:
> 
> 
> On 3/12/2026 3:14 PM, David Hildenbrand (Arm) wrote:
>> On 3/9/26 13:07, Shivank Garg wrote:
>>> Add a PAGE_ALREADY_COPIED flag to the dst->private migration state.
>>> When set, __migrate_folio() skips folio_mc_copy() and performs
>>> metadata-only migration. All callers currently pass
>>> already_copied=false. The batch-copy path enables it in a later patch.
>>>
>>> Move the dst->private state enum earlier in the file so
>>> __migrate_folio() and move_to_new_folio() can see PAGE_ALREADY_COPIED.
>>>
>>> Signed-off-by: Shivank Garg <shivankg@amd.com>
>>> ---
>>>  mm/migrate.c | 52 +++++++++++++++++++++++++++++++---------------------
>>>  1 file changed, 31 insertions(+), 21 deletions(-)
>>>
>>> diff --git a/mm/migrate.c b/mm/migrate.c
>>> index 1bf2cf8c44dd..1d8c1fb627c9 100644
>>> --- a/mm/migrate.c
>>> +++ b/mm/migrate.c
>>> @@ -848,6 +848,18 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
>>>  }
>>>  EXPORT_SYMBOL(folio_migrate_flags);
>>>  
>>> +/*
>>> + * To record some information during migration, we use unused private
>>> + * field of struct folio of the newly allocated destination folio.
>>> + * This is safe because nobody is using it except us.
>>> + */
>>> +enum {
>>> +	PAGE_WAS_MAPPED = BIT(0),
>>> +	PAGE_WAS_MLOCKED = BIT(1),
>>> +	PAGE_ALREADY_COPIED = BIT(2),
>>> +	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED | PAGE_ALREADY_COPIED,
>>
>> All these states really only apply to proper folios (not movable_ops).
>> So once we complete decoupling movable_ops migration from folio
>> migration, these flags would only appear in the folio migration part.
>>
>> Can we convert them first to state it clearly already that these are
>> folio migration flags?
>>
>> FOLIO_MF_WAS_MAPPED
>>
>> ...
>>
> Sure, done.
> 
> Should I fold it into the series? Or send it as independent patch as this series would
> likely take few more rounds of reviews and discussion.

Best to send it out as a standalone cleanup :)

-- 
Cheers,

David