From: Gregory Price <gourry@gourry.net>
A common operation in tiering is to migrate multiple pages at once.
The migrate_misplaced_folio function requires one call for each
individual folio. Expose a batch-variant of the same call for use
when doing batch migrations.
Signed-off-by: Gregory Price <gourry@gourry.net>
Signed-off-by: Bharata B Rao <bharata@amd.com>
---
include/linux/migrate.h | 6 ++++++
mm/migrate.c | 31 +++++++++++++++++++++++++++++++
2 files changed, 37 insertions(+)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index acadd41e0b5c..0593f5869be8 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -107,6 +107,7 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
int migrate_misplaced_folio_prepare(struct folio *folio,
struct vm_area_struct *vma, int node);
int migrate_misplaced_folio(struct folio *folio, int node);
+int migrate_misplaced_folios_batch(struct list_head *foliolist, int node);
#else
static inline int migrate_misplaced_folio_prepare(struct folio *folio,
struct vm_area_struct *vma, int node)
@@ -117,6 +118,11 @@ static inline int migrate_misplaced_folio(struct folio *folio, int node)
{
return -EAGAIN; /* can't migrate now */
}
+static inline int migrate_misplaced_folios_batch(struct list_head *foliolist,
+ int node)
+{
+ return -EAGAIN; /* can't migrate now */
+}
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_MIGRATION
diff --git a/mm/migrate.c b/mm/migrate.c
index 7e356c0b1b5a..1268a95eda0e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2714,5 +2714,36 @@ int migrate_misplaced_folio(struct folio *folio, int node)
BUG_ON(!list_empty(&migratepages));
return nr_remaining ? -EAGAIN : 0;
}
+
+/*
+ * Batch variant of migrate_misplaced_folio. Attempts to migrate
+ * a folio list to the specified destination.
+ *
+ * Caller is expected to have isolated the folios by calling
+ * migrate_misplaced_folio_prepare(), which will result in an
+ * elevated reference count on the folio.
+ *
+ * This function will un-isolate the folios, dereference them, and
+ * remove them from the list before returning.
+ */
+int migrate_misplaced_folios_batch(struct list_head *folio_list, int node)
+{
+ pg_data_t *pgdat = NODE_DATA(node);
+ unsigned int nr_succeeded;
+ int nr_remaining;
+
+ nr_remaining = migrate_pages(folio_list, alloc_misplaced_dst_folio,
+ NULL, node, MIGRATE_ASYNC,
+ MR_NUMA_MISPLACED, &nr_succeeded);
+ if (nr_remaining)
+ putback_movable_pages(folio_list);
+
+ if (nr_succeeded) {
+ count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
+ mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, nr_succeeded);
+ }
+ BUG_ON(!list_empty(folio_list));
+ return nr_remaining ? -EAGAIN : 0;
+}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_NUMA */
--
2.34.1
Bharata B Rao <bharata@amd.com> writes: > From: Gregory Price <gourry@gourry.net> > > A common operation in tiering is to migrate multiple pages at once. Is it common now? If so, you can replace some callers of migrate_misplaced_folio() with migrate_misplaced_folios_batch(). > The migrate_misplaced_folio function requires one call for each IMHO, migrate_misplaced_folio() is more concise than migrate_misplaced_folio function. > individual folio. Expose a batch-variant of the same call for use > when doing batch migrations. > > Signed-off-by: Gregory Price <gourry@gourry.net> > Signed-off-by: Bharata B Rao <bharata@amd.com> > --- > include/linux/migrate.h | 6 ++++++ > mm/migrate.c | 31 +++++++++++++++++++++++++++++++ > 2 files changed, 37 insertions(+) > > diff --git a/include/linux/migrate.h b/include/linux/migrate.h > index acadd41e0b5c..0593f5869be8 100644 > --- a/include/linux/migrate.h > +++ b/include/linux/migrate.h > @@ -107,6 +107,7 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, > int migrate_misplaced_folio_prepare(struct folio *folio, > struct vm_area_struct *vma, int node); > int migrate_misplaced_folio(struct folio *folio, int node); > +int migrate_misplaced_folios_batch(struct list_head *foliolist, int node); > #else > static inline int migrate_misplaced_folio_prepare(struct folio *folio, > struct vm_area_struct *vma, int node) > @@ -117,6 +118,11 @@ static inline int migrate_misplaced_folio(struct folio *folio, int node) > { > return -EAGAIN; /* can't migrate now */ > } > +static inline int migrate_misplaced_folios_batch(struct list_head *foliolist, > + int node) > +{ > + return -EAGAIN; /* can't migrate now */ > +} > #endif /* CONFIG_NUMA_BALANCING */ > > #ifdef CONFIG_MIGRATION > diff --git a/mm/migrate.c b/mm/migrate.c > index 7e356c0b1b5a..1268a95eda0e 100644 > --- a/mm/migrate.c > +++ b/mm/migrate.c > @@ -2714,5 +2714,36 @@ int migrate_misplaced_folio(struct folio *folio, int node) > BUG_ON(!list_empty(&migratepages)); > return nr_remaining ? -EAGAIN : 0; > } > + > +/* > + * Batch variant of migrate_misplaced_folio. Attempts to migrate > + * a folio list to the specified destination. > + * > + * Caller is expected to have isolated the folios by calling > + * migrate_misplaced_folio_prepare(), which will result in an > + * elevated reference count on the folio. > + * > + * This function will un-isolate the folios, dereference them, and > + * remove them from the list before returning. > + */ > +int migrate_misplaced_folios_batch(struct list_head *folio_list, int node) In addition to working on a list of folios instead of single folio, I found there are some difference about memcg counting between migrate_misplaced_folios_batch() and migrate_misplace_folio(). Why? And, can we merge the implementation of two functions to reduce code duplication? > +{ > + pg_data_t *pgdat = NODE_DATA(node); > + unsigned int nr_succeeded; > + int nr_remaining; > + > + nr_remaining = migrate_pages(folio_list, alloc_misplaced_dst_folio, > + NULL, node, MIGRATE_ASYNC, > + MR_NUMA_MISPLACED, &nr_succeeded); > + if (nr_remaining) > + putback_movable_pages(folio_list); > + > + if (nr_succeeded) { > + count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); > + mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, nr_succeeded); > + } > + BUG_ON(!list_empty(folio_list)); > + return nr_remaining ? -EAGAIN : 0; > +} > #endif /* CONFIG_NUMA_BALANCING */ > #endif /* CONFIG_NUMA */ --- Best Regards, Huang, Ying
© 2016 - 2025 Red Hat, Inc.