Add routines to support allocation of large order zone device folios
and helper functions for zone device folios, to check if a folio is
device private and helpers for setting zone device data.
When large folios are used, the existing page_free() callback in
pgmap is called when the folio is freed, this is true for both
PAGE_SIZE and higher order pages.
Zone device private large folios do not support deferred split and
scan like normal THP folios.
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
---
include/linux/memremap.h | 10 +++++++++-
mm/memremap.c | 38 +++++++++++++++++++++++++-------------
mm/rmap.c | 6 +++++-
3 files changed, 39 insertions(+), 15 deletions(-)
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 4aa151914eab..a0723b35eeaa 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -199,7 +199,7 @@ static inline bool folio_is_fsdax(const struct folio *folio)
}
#ifdef CONFIG_ZONE_DEVICE
-void zone_device_page_init(struct page *page);
+void zone_device_folio_init(struct folio *folio, unsigned int order);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
@@ -209,6 +209,14 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long memremap_compat_align(void);
+
+static inline void zone_device_page_init(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+
+ zone_device_folio_init(folio, 0);
+}
+
#else
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
diff --git a/mm/memremap.c b/mm/memremap.c
index b0ce0d8254bd..13e87dd743ad 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -427,20 +427,19 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
void free_zone_device_folio(struct folio *folio)
{
struct dev_pagemap *pgmap = folio->pgmap;
+ unsigned long nr = folio_nr_pages(folio);
+ int i;
if (WARN_ON_ONCE(!pgmap))
return;
mem_cgroup_uncharge(folio);
- /*
- * Note: we don't expect anonymous compound pages yet. Once supported
- * and we could PTE-map them similar to THP, we'd have to clear
- * PG_anon_exclusive on all tail pages.
- */
if (folio_test_anon(folio)) {
- VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
- __ClearPageAnonExclusive(folio_page(folio, 0));
+ for (i = 0; i < nr; i++)
+ __ClearPageAnonExclusive(folio_page(folio, i));
+ } else {
+ VM_WARN_ON_ONCE(folio_test_large(folio));
}
/*
@@ -464,11 +463,15 @@ void free_zone_device_folio(struct folio *folio)
switch (pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
+ percpu_ref_put_many(&folio->pgmap->ref, nr);
+ pgmap->ops->page_free(&folio->page);
+ folio->page.mapping = NULL;
+ break;
case MEMORY_DEVICE_COHERENT:
if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free))
break;
- pgmap->ops->page_free(folio_page(folio, 0));
- put_dev_pagemap(pgmap);
+ pgmap->ops->page_free(&folio->page);
+ percpu_ref_put(&folio->pgmap->ref);
break;
case MEMORY_DEVICE_GENERIC:
@@ -491,14 +494,23 @@ void free_zone_device_folio(struct folio *folio)
}
}
-void zone_device_page_init(struct page *page)
+void zone_device_folio_init(struct folio *folio, unsigned int order)
{
+ struct page *page = folio_page(folio, 0);
+
+ VM_WARN_ON_ONCE(order > MAX_ORDER_NR_PAGES);
+
/*
* Drivers shouldn't be allocating pages after calling
* memunmap_pages().
*/
- WARN_ON_ONCE(!percpu_ref_tryget_live(&page_pgmap(page)->ref));
- set_page_count(page, 1);
+ WARN_ON_ONCE(!percpu_ref_tryget_many(&page_pgmap(page)->ref, 1 << order));
+ folio_set_count(folio, 1);
lock_page(page);
+
+ if (order > 1) {
+ prep_compound_page(page, order);
+ folio_set_large_rmappable(folio);
+ }
}
-EXPORT_SYMBOL_GPL(zone_device_page_init);
+EXPORT_SYMBOL_GPL(zone_device_folio_init);
diff --git a/mm/rmap.c b/mm/rmap.c
index 568198e9efc2..b5837075b6e0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1769,9 +1769,13 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
* the folio is unmapped and at least one page is still mapped.
*
* Check partially_mapped first to ensure it is a large folio.
+ *
+ * Device private folios do not support deferred splitting and
+ * shrinker based scanning of the folios to free.
*/
if (partially_mapped && folio_test_anon(folio) &&
- !folio_test_partially_mapped(folio))
+ !folio_test_partially_mapped(folio) &&
+ !folio_is_device_private(folio))
deferred_split_folio(folio, true);
__folio_mod_stat(folio, -nr, -nr_pmdmapped);
--
2.50.1
On 12.08.25 04:40, Balbir Singh wrote: > Add routines to support allocation of large order zone device folios > and helper functions for zone device folios, to check if a folio is > device private and helpers for setting zone device data. > > When large folios are used, the existing page_free() callback in > pgmap is called when the folio is freed, this is true for both > PAGE_SIZE and higher order pages. > > Zone device private large folios do not support deferred split and > scan like normal THP folios. [...] > #else > static inline void *devm_memremap_pages(struct device *dev, > struct dev_pagemap *pgmap) > diff --git a/mm/memremap.c b/mm/memremap.c > index b0ce0d8254bd..13e87dd743ad 100644 > --- a/mm/memremap.c > +++ b/mm/memremap.c > @@ -427,20 +427,19 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap); > void free_zone_device_folio(struct folio *folio) > { > struct dev_pagemap *pgmap = folio->pgmap; > + unsigned long nr = folio_nr_pages(folio); > + int i; Not that it will currently matter much but unsigned long i, nr = folio_nr_pages(folio); might be more consistent > > if (WARN_ON_ONCE(!pgmap)) > return; > > mem_cgroup_uncharge(folio); > > - /* > - * Note: we don't expect anonymous compound pages yet. Once supported > - * and we could PTE-map them similar to THP, we'd have to clear > - * PG_anon_exclusive on all tail pages. > - */ > if (folio_test_anon(folio)) { > - VM_BUG_ON_FOLIO(folio_test_large(folio), folio); > - __ClearPageAnonExclusive(folio_page(folio, 0)); > + for (i = 0; i < nr; i++) > + __ClearPageAnonExclusive(folio_page(folio, i)); > + } else { > + VM_WARN_ON_ONCE(folio_test_large(folio)); > } > > /* > @@ -464,11 +463,15 @@ void free_zone_device_folio(struct folio *folio) > > switch (pgmap->type) { > case MEMORY_DEVICE_PRIVATE: Why are you effectively dropping the if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) > + percpu_ref_put_many(&folio->pgmap->ref, nr); > + pgmap->ops->page_free(&folio->page); > + folio->page.mapping = NULL; Why are we adding this here? Does not seem large-folio specific. > + break; > case MEMORY_DEVICE_COHERENT: > if (WARN_ON_ONCE(!pgmap->ops || !pgmap->ops->page_free)) > break; > - pgmap->ops->page_free(folio_page(folio, 0)); > - put_dev_pagemap(pgmap); > + pgmap->ops->page_free(&folio->page); > + percpu_ref_put(&folio->pgmap->ref); This looks like an independent change that does not belong in this patch. Can't you just leave the code as is and simply convert percpu_ref_put to percpu_ref_put_many()? What am I missing? > break; > > case MEMORY_DEVICE_GENERIC: > @@ -491,14 +494,23 @@ void free_zone_device_folio(struct folio *folio) > } > } > > -void zone_device_page_init(struct page *page) > +void zone_device_folio_init(struct folio *folio, unsigned int order) > { > + struct page *page = folio_page(folio, 0); > + > + VM_WARN_ON_ONCE(order > MAX_ORDER_NR_PAGES); order vs. pages is wrong. In context of [1] this should probably be VM_WARN_ON_ONCE(order > MAX_FOLIO_ORDER); And before that is in VM_WARN_ON_ONCE((1u << order) > MAX_FOLIO_NR_PAGES); because we don't involve the buddy, so likely buddy limits do not apply. [1] https://lore.kernel.org/all/20250821200701.1329277-1-david@redhat.com/ > + > /* > * Drivers shouldn't be allocating pages after calling > * memunmap_pages(). > */ > - WARN_ON_ONCE(!percpu_ref_tryget_live(&page_pgmap(page)->ref)); > - set_page_count(page, 1); > + WARN_ON_ONCE(!percpu_ref_tryget_many(&page_pgmap(page)->ref, 1 << order)); > + folio_set_count(folio, 1); > lock_page(page); > + > + if (order > 1) { > + prep_compound_page(page, order); > + folio_set_large_rmappable(folio); > + } > } > -EXPORT_SYMBOL_GPL(zone_device_page_init); > +EXPORT_SYMBOL_GPL(zone_device_folio_init); > diff --git a/mm/rmap.c b/mm/rmap.c > index 568198e9efc2..b5837075b6e0 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -1769,9 +1769,13 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, > * the folio is unmapped and at least one page is still mapped. > * > * Check partially_mapped first to ensure it is a large folio. > + * > + * Device private folios do not support deferred splitting and > + * shrinker based scanning of the folios to free. > */ > if (partially_mapped && folio_test_anon(folio) && > - !folio_test_partially_mapped(folio)) > + !folio_test_partially_mapped(folio) && > + !folio_is_device_private(folio)) Please indent like the previous line. if (partially_mapped && folio_test_anon(folio) && !folio_test_partially_mapped(folio) && !folio_is_device_private(folio)) > deferred_split_folio(folio, true); > > __folio_mod_stat(folio, -nr, -nr_pmdmapped); -- Cheers David / dhildenb
© 2016 - 2025 Red Hat, Inc.