From: Gregory Price <gourry@gourry.net>
A common operation in tiering is to migrate multiple pages at once.
The migrate_misplaced_folio function requires one call for each
individual folio. Expose a batch-variant of the same call for use
when doing batch migrations.
Signed-off-by: Gregory Price <gourry@gourry.net>
Signed-off-by: Bharata B Rao <bharata@amd.com>
---
include/linux/migrate.h | 6 ++++++
mm/migrate.c | 31 +++++++++++++++++++++++++++++++
2 files changed, 37 insertions(+)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index acadd41e0b5c..0593f5869be8 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -107,6 +107,7 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
int migrate_misplaced_folio_prepare(struct folio *folio,
struct vm_area_struct *vma, int node);
int migrate_misplaced_folio(struct folio *folio, int node);
+int migrate_misplaced_folios_batch(struct list_head *foliolist, int node);
#else
static inline int migrate_misplaced_folio_prepare(struct folio *folio,
struct vm_area_struct *vma, int node)
@@ -117,6 +118,11 @@ static inline int migrate_misplaced_folio(struct folio *folio, int node)
{
return -EAGAIN; /* can't migrate now */
}
+static inline int migrate_misplaced_folios_batch(struct list_head *foliolist,
+ int node)
+{
+ return -EAGAIN; /* can't migrate now */
+}
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_MIGRATION
diff --git a/mm/migrate.c b/mm/migrate.c
index 7e356c0b1b5a..1268a95eda0e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2714,5 +2714,36 @@ int migrate_misplaced_folio(struct folio *folio, int node)
BUG_ON(!list_empty(&migratepages));
return nr_remaining ? -EAGAIN : 0;
}
+
+/*
+ * Batch variant of migrate_misplaced_folio. Attempts to migrate
+ * a folio list to the specified destination.
+ *
+ * Caller is expected to have isolated the folios by calling
+ * migrate_misplaced_folio_prepare(), which will result in an
+ * elevated reference count on the folio.
+ *
+ * This function will un-isolate the folios, dereference them, and
+ * remove them from the list before returning.
+ */
+int migrate_misplaced_folios_batch(struct list_head *folio_list, int node)
+{
+ pg_data_t *pgdat = NODE_DATA(node);
+ unsigned int nr_succeeded;
+ int nr_remaining;
+
+ nr_remaining = migrate_pages(folio_list, alloc_misplaced_dst_folio,
+ NULL, node, MIGRATE_ASYNC,
+ MR_NUMA_MISPLACED, &nr_succeeded);
+ if (nr_remaining)
+ putback_movable_pages(folio_list);
+
+ if (nr_succeeded) {
+ count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
+ mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, nr_succeeded);
+ }
+ BUG_ON(!list_empty(folio_list));
+ return nr_remaining ? -EAGAIN : 0;
+}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_NUMA */
--
2.34.1
On Wed, 10 Sep 2025 20:16:47 +0530
Bharata B Rao <bharata@amd.com> wrote:
> From: Gregory Price <gourry@gourry.net>
>
> A common operation in tiering is to migrate multiple pages at once.
> The migrate_misplaced_folio function requires one call for each
> individual folio. Expose a batch-variant of the same call for use
> when doing batch migrations.
>
I probably missed an earlier discussion of this but what does the
_batch postfix add over the plural (folios)?
> Signed-off-by: Gregory Price <gourry@gourry.net>
> Signed-off-by: Bharata B Rao <bharata@amd.com>
> ---
> include/linux/migrate.h | 6 ++++++
> mm/migrate.c | 31 +++++++++++++++++++++++++++++++
> 2 files changed, 37 insertions(+)
>
> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
> index acadd41e0b5c..0593f5869be8 100644
> --- a/include/linux/migrate.h
> +++ b/include/linux/migrate.h
> @@ -107,6 +107,7 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
> int migrate_misplaced_folio_prepare(struct folio *folio,
> struct vm_area_struct *vma, int node);
> int migrate_misplaced_folio(struct folio *folio, int node);
> +int migrate_misplaced_folios_batch(struct list_head *foliolist, int node);
> #else
> static inline int migrate_misplaced_folio_prepare(struct folio *folio,
> struct vm_area_struct *vma, int node)
> @@ -117,6 +118,11 @@ static inline int migrate_misplaced_folio(struct folio *folio, int node)
> {
> return -EAGAIN; /* can't migrate now */
> }
> +static inline int migrate_misplaced_folios_batch(struct list_head *foliolist,
> + int node)
> +{
> + return -EAGAIN; /* can't migrate now */
> +}
> #endif /* CONFIG_NUMA_BALANCING */
>
> #ifdef CONFIG_MIGRATION
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 7e356c0b1b5a..1268a95eda0e 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -2714,5 +2714,36 @@ int migrate_misplaced_folio(struct folio *folio, int node)
> BUG_ON(!list_empty(&migratepages));
> return nr_remaining ? -EAGAIN : 0;
> }
> +
> +/*
Kernel-doc perhaps appropriate?
> + * Batch variant of migrate_misplaced_folio. Attempts to migrate
> + * a folio list to the specified destination.
> + *
> + * Caller is expected to have isolated the folios by calling
> + * migrate_misplaced_folio_prepare(), which will result in an
> + * elevated reference count on the folio.
> + *
> + * This function will un-isolate the folios, dereference them, and
> + * remove them from the list before returning.
> + */
> +int migrate_misplaced_folios_batch(struct list_head *folio_list, int node)
> +{
> + pg_data_t *pgdat = NODE_DATA(node);
> + unsigned int nr_succeeded;
> + int nr_remaining;
> +
> + nr_remaining = migrate_pages(folio_list, alloc_misplaced_dst_folio,
> + NULL, node, MIGRATE_ASYNC,
> + MR_NUMA_MISPLACED, &nr_succeeded);
> + if (nr_remaining)
> + putback_movable_pages(folio_list);
> +
> + if (nr_succeeded) {
> + count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
> + mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, nr_succeeded);
> + }
> + BUG_ON(!list_empty(folio_list));
> + return nr_remaining ? -EAGAIN : 0;
> +}
> #endif /* CONFIG_NUMA_BALANCING */
> #endif /* CONFIG_NUMA */
On 03-Oct-25 4:06 PM, Jonathan Cameron wrote:
> On Wed, 10 Sep 2025 20:16:47 +0530
> Bharata B Rao <bharata@amd.com> wrote:
>
>> From: Gregory Price <gourry@gourry.net>
>>
>> A common operation in tiering is to migrate multiple pages at once.
>> The migrate_misplaced_folio function requires one call for each
>> individual folio. Expose a batch-variant of the same call for use
>> when doing batch migrations.
>>
> I probably missed an earlier discussion of this but what does the
> _batch postfix add over the plural (folios)?
https://lore.kernel.org/linux-mm/15744682-72ea-472f-9af1-50c3494c0b78@redhat.com/
>
>> Signed-off-by: Gregory Price <gourry@gourry.net>
>> Signed-off-by: Bharata B Rao <bharata@amd.com>
>> ---
>> include/linux/migrate.h | 6 ++++++
>> mm/migrate.c | 31 +++++++++++++++++++++++++++++++
>> 2 files changed, 37 insertions(+)
>>
>> diff --git a/include/linux/migrate.h b/include/linux/migrate.h
>> index acadd41e0b5c..0593f5869be8 100644
>> --- a/include/linux/migrate.h
>> +++ b/include/linux/migrate.h
>> @@ -107,6 +107,7 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
>> int migrate_misplaced_folio_prepare(struct folio *folio,
>> struct vm_area_struct *vma, int node);
>> int migrate_misplaced_folio(struct folio *folio, int node);
>> +int migrate_misplaced_folios_batch(struct list_head *foliolist, int node);
>> #else
>> static inline int migrate_misplaced_folio_prepare(struct folio *folio,
>> struct vm_area_struct *vma, int node)
>> @@ -117,6 +118,11 @@ static inline int migrate_misplaced_folio(struct folio *folio, int node)
>> {
>> return -EAGAIN; /* can't migrate now */
>> }
>> +static inline int migrate_misplaced_folios_batch(struct list_head *foliolist,
>> + int node)
>> +{
>> + return -EAGAIN; /* can't migrate now */
>> +}
>> #endif /* CONFIG_NUMA_BALANCING */
>>
>> #ifdef CONFIG_MIGRATION
>> diff --git a/mm/migrate.c b/mm/migrate.c
>> index 7e356c0b1b5a..1268a95eda0e 100644
>> --- a/mm/migrate.c
>> +++ b/mm/migrate.c
>> @@ -2714,5 +2714,36 @@ int migrate_misplaced_folio(struct folio *folio, int node)
>> BUG_ON(!list_empty(&migratepages));
>> return nr_remaining ? -EAGAIN : 0;
>> }
>> +
>> +/*
>
> Kernel-doc perhaps appropriate?
Probably yes, will take care in next iteration.
Thanks for looking into this patchset.
Regards,
Bharata.
© 2016 - 2026 Red Hat, Inc.