mm/migrate.c | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-)
These flags only track folio-specific state during migration and are
not used for movable_ops pages. Rename the enum values and the
old_page_state variable to match.
No functional change.
Suggested-by: David Hildenbrand <david@kernel.org>
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
Applies cleanly on mm-new
Ref: https://lore.kernel.org/linux-mm/4bfec2c1-9747-49d2-a490-87ab204cec1c@kernel.org
mm/migrate.c | 46 +++++++++++++++++++++++-----------------------
1 file changed, 23 insertions(+), 23 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index 05cb408846f2..a1856321ae27 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1135,26 +1135,26 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
* This is safe because nobody is using it except us.
*/
enum {
- PAGE_WAS_MAPPED = BIT(0),
- PAGE_WAS_MLOCKED = BIT(1),
- PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
+ FOLIO_MF_WAS_MAPPED = BIT(0),
+ FOLIO_MF_WAS_MLOCKED = BIT(1),
+ FOLIO_MF_OLD_STATES = FOLIO_MF_WAS_MAPPED | FOLIO_MF_WAS_MLOCKED,
};
static void __migrate_folio_record(struct folio *dst,
- int old_page_state,
+ int old_folio_state,
struct anon_vma *anon_vma)
{
- dst->private = (void *)anon_vma + old_page_state;
+ dst->private = (void *)anon_vma + old_folio_state;
}
static void __migrate_folio_extract(struct folio *dst,
- int *old_page_state,
+ int *old_folio_state,
struct anon_vma **anon_vmap)
{
unsigned long private = (unsigned long)dst->private;
- *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
- *old_page_state = private & PAGE_OLD_STATES;
+ *anon_vmap = (struct anon_vma *)(private & ~FOLIO_MF_OLD_STATES);
+ *old_folio_state = private & FOLIO_MF_OLD_STATES;
dst->private = NULL;
}
@@ -1209,7 +1209,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
{
struct folio *dst;
int rc = -EAGAIN;
- int old_page_state = 0;
+ int old_folio_state = 0;
struct anon_vma *anon_vma = NULL;
bool locked = false;
bool dst_locked = false;
@@ -1253,7 +1253,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
}
locked = true;
if (folio_test_mlocked(src))
- old_page_state |= PAGE_WAS_MLOCKED;
+ old_folio_state |= FOLIO_MF_WAS_MLOCKED;
if (folio_test_writeback(src)) {
/*
@@ -1302,7 +1302,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
dst_locked = true;
if (unlikely(page_has_movable_ops(&src->page))) {
- __migrate_folio_record(dst, old_page_state, anon_vma);
+ __migrate_folio_record(dst, old_folio_state, anon_vma);
return 0;
}
@@ -1328,11 +1328,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
VM_BUG_ON_FOLIO(folio_test_anon(src) &&
!folio_test_ksm(src) && !anon_vma, src);
try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
- old_page_state |= PAGE_WAS_MAPPED;
+ old_folio_state |= FOLIO_MF_WAS_MAPPED;
}
if (!folio_mapped(src)) {
- __migrate_folio_record(dst, old_page_state, anon_vma);
+ __migrate_folio_record(dst, old_folio_state, anon_vma);
return 0;
}
@@ -1344,7 +1344,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
if (rc == -EAGAIN)
ret = NULL;
- migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+ migrate_folio_undo_src(src, old_folio_state & FOLIO_MF_WAS_MAPPED,
anon_vma, locked, ret);
migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
@@ -1358,13 +1358,13 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
struct list_head *ret)
{
int rc;
- int old_page_state = 0;
+ int old_folio_state = 0;
struct anon_vma *anon_vma = NULL;
bool src_deferred_split = false;
bool src_partially_mapped = false;
struct list_head *prev;
- __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+ __migrate_folio_extract(dst, &old_folio_state, &anon_vma);
prev = dst->lru.prev;
list_del(&dst->lru);
@@ -1395,10 +1395,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
* isolated from the unevictable LRU: but this case is the easiest.
*/
folio_add_lru(dst);
- if (old_page_state & PAGE_WAS_MLOCKED)
+ if (old_folio_state & FOLIO_MF_WAS_MLOCKED)
lru_add_drain();
- if (old_page_state & PAGE_WAS_MAPPED)
+ if (old_folio_state & FOLIO_MF_WAS_MAPPED)
remove_migration_ptes(src, dst, 0);
/*
@@ -1439,11 +1439,11 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
*/
if (rc == -EAGAIN) {
list_add(&dst->lru, prev);
- __migrate_folio_record(dst, old_page_state, anon_vma);
+ __migrate_folio_record(dst, old_folio_state, anon_vma);
return rc;
}
- migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+ migrate_folio_undo_src(src, old_folio_state & FOLIO_MF_WAS_MAPPED,
anon_vma, true, ret);
migrate_folio_undo_dst(dst, true, put_new_folio, private);
@@ -1777,11 +1777,11 @@ static void migrate_folios_undo(struct list_head *src_folios,
dst = list_first_entry(dst_folios, struct folio, lru);
dst2 = list_next_entry(dst, lru);
list_for_each_entry_safe(folio, folio2, src_folios, lru) {
- int old_page_state = 0;
+ int old_folio_state = 0;
struct anon_vma *anon_vma = NULL;
- __migrate_folio_extract(dst, &old_page_state, &anon_vma);
- migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
+ __migrate_folio_extract(dst, &old_folio_state, &anon_vma);
+ migrate_folio_undo_src(folio, old_folio_state & FOLIO_MF_WAS_MAPPED,
anon_vma, true, ret_folios);
list_del(&dst->lru);
migrate_folio_undo_dst(dst, true, put_new_folio, private);
--
2.43.0
On Mon, Mar 23, 2026 at 02:19:37PM +0000, Shivank Garg wrote:
> +++ b/mm/migrate.c
> @@ -1135,26 +1135,26 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
> * This is safe because nobody is using it except us.
> */
> enum {
> - PAGE_WAS_MAPPED = BIT(0),
> - PAGE_WAS_MLOCKED = BIT(1),
> - PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
> + FOLIO_MF_WAS_MAPPED = BIT(0),
> + FOLIO_MF_WAS_MLOCKED = BIT(1),
> + FOLIO_MF_OLD_STATES = FOLIO_MF_WAS_MAPPED | FOLIO_MF_WAS_MLOCKED,
what does 'MF' stand for? i mean, presumably not the pop culture
meaning of MF ...
> locked = true;
> if (folio_test_mlocked(src))
> - old_page_state |= PAGE_WAS_MLOCKED;
> + old_folio_state |= FOLIO_MF_WAS_MLOCKED;
I mean, this doesn't make sense any more. PAGE_WAS_MLOCKED is a
sentence that means something. FOLIO_MF_WAS_MLOCKED doesn't ... unless
you imagine Samuel L Jackson saying it ...
On 3/23/26 15:46, Matthew Wilcox wrote:
> On Mon, Mar 23, 2026 at 02:19:37PM +0000, Shivank Garg wrote:
>> +++ b/mm/migrate.c
>> @@ -1135,26 +1135,26 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
>> * This is safe because nobody is using it except us.
>> */
>> enum {
>> - PAGE_WAS_MAPPED = BIT(0),
>> - PAGE_WAS_MLOCKED = BIT(1),
>> - PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
>> + FOLIO_MF_WAS_MAPPED = BIT(0),
>> + FOLIO_MF_WAS_MLOCKED = BIT(1),
>> + FOLIO_MF_OLD_STATES = FOLIO_MF_WAS_MAPPED | FOLIO_MF_WAS_MLOCKED,
>
> what does 'MF' stand for? i mean, presumably not the pop culture
> meaning of MF ...
Migration FLag
>
>> locked = true;
>> if (folio_test_mlocked(src))
>> - old_page_state |= PAGE_WAS_MLOCKED;
>> + old_folio_state |= FOLIO_MF_WAS_MLOCKED;
>
> I mean, this doesn't make sense any more. PAGE_WAS_MLOCKED is a
> sentence that means something. FOLIO_MF_WAS_MLOCKED doesn't ... unless
> you imagine Samuel L Jackson saying it ...
:)
PAGE_WAS_MLOCKED was suboptimal given that the names overlap with things
like PAGE_SHARED or even PAGE_IS_PRESENT.
So using some prefix to indicate that these flag belong logically
together makes sense. Even if it's not a beautiful sentence.
FOLIO_MIGRATION_* could be done, but "FOLIO_MIGRATION_WAS_MLOCKED" is
also does not make sense. FOLIO_MIGRATION_FLAG_* is mouthful.
--
Cheers,
David
© 2016 - 2026 Red Hat, Inc.