Add device-private THP support to reverse mapping infrastructure, enabling
proper handling during migration and walk operations.
The key changes are:
- add_migration_pmd()/remove_migration_pmd(): Handle device-private
entries during folio migration and splitting
- page_vma_mapped_walk(): Recognize device-private THP entries during
VMA traversal operations
This change supports folio splitting and migration operations on
device-private entries.
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
---
mm/damon/ops-common.c | 20 +++++++++++++++++---
mm/huge_memory.c | 16 +++++++++++++++-
mm/page_idle.c | 7 +++++--
mm/page_vma_mapped.c | 7 +++++++
mm/rmap.c | 21 +++++++++++++++++----
5 files changed, 61 insertions(+), 10 deletions(-)
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index 998c5180a603..eda4de553611 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -75,12 +75,24 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
+ pmd_t pmdval = pmdp_get(pmd);
+ struct folio *folio;
+ bool young = false;
+ unsigned long pfn;
+
+ if (likely(pmd_present(pmdval)))
+ pfn = pmd_pfn(pmdval);
+ else
+ pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
+ folio = damon_get_folio(pfn);
if (!folio)
return;
- if (pmdp_clear_young_notify(vma, addr, pmd))
+ if (likely(pmd_present(pmdval)))
+ young |= pmdp_clear_young_notify(vma, addr, pmd);
+ young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
+ if (young)
folio_set_young(folio);
folio_set_idle(folio);
@@ -203,7 +215,9 @@ static bool damon_folio_young_one(struct folio *folio,
mmu_notifier_test_young(vma->vm_mm, addr);
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
+ pmd_t pmd = pmdp_get(pvmw.pmd);
+
+ *accessed = (pmd_present(pmd) && pmd_young(pmd)) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
#else
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a5e4c2aef191..78166db72f4d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -4637,7 +4637,10 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
return 0;
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
- pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
+ if (unlikely(!pmd_present(*pvmw->pmd)))
+ pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd);
+ else
+ pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
/* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
@@ -4687,6 +4690,17 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
entry = pmd_to_swp_entry(*pvmw->pmd);
folio_get(folio);
pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
+
+ if (folio_is_device_private(folio)) {
+ if (pmd_write(pmde))
+ entry = make_writable_device_private_entry(
+ page_to_pfn(new));
+ else
+ entry = make_readable_device_private_entry(
+ page_to_pfn(new));
+ pmde = swp_entry_to_pmd(entry);
+ }
+
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_writable_migration_entry(entry))
diff --git a/mm/page_idle.c b/mm/page_idle.c
index a82b340dc204..3bf0fbe05cc2 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -71,8 +71,11 @@ static bool page_idle_clear_pte_refs_one(struct folio *folio,
referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte);
referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
- if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
- referenced = true;
+ pmd_t pmdval = pmdp_get(pvmw.pmd);
+
+ if (likely(pmd_present(pmdval)))
+ referenced |= pmdp_clear_young_notify(vma, addr, pvmw.pmd);
+ referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
} else {
/* unexpected pmd-mapped page? */
WARN_ON_ONCE(1);
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index e981a1a292d2..159953c590cc 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -277,6 +277,13 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
* cannot return prematurely, while zap_huge_pmd() has
* cleared *pmd but not decremented compound_mapcount().
*/
+ swp_entry_t entry = pmd_to_swp_entry(pmde);
+
+ if (is_device_private_entry(entry)) {
+ pvmw->ptl = pmd_lock(mm, pvmw->pmd);
+ return true;
+ }
+
if ((pvmw->flags & PVMW_SYNC) &&
thp_vma_suitable_order(vma, pvmw->address,
PMD_ORDER) &&
diff --git a/mm/rmap.c b/mm/rmap.c
index 9a2aabfaea6f..080fc4048431 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1063,9 +1063,11 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t *pmd = pvmw->pmd;
- pmd_t entry;
+ pmd_t entry = pmdp_get(pmd);
- if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
+ if (!pmd_present(entry))
+ continue;
+ if (!pmd_dirty(entry) && !pmd_write(entry))
continue;
flush_cache_range(vma, address,
@@ -2330,6 +2332,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
while (page_vma_mapped_walk(&pvmw)) {
/* PMD-mapped THP migration entry */
if (!pvmw.pte) {
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+ unsigned long pfn;
+ pmd_t pmdval;
+#endif
+
if (flags & TTU_SPLIT_HUGE_PMD) {
split_huge_pmd_locked(vma, pvmw.address,
pvmw.pmd, true);
@@ -2338,8 +2345,14 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
break;
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
- subpage = folio_page(folio,
- pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
+ pmdval = pmdp_get(pvmw.pmd);
+ if (likely(pmd_present(pmdval)))
+ pfn = pmd_pfn(pmdval);
+ else
+ pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
+
+ subpage = folio_page(folio, pfn - folio_pfn(folio));
+
VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
!folio_test_pmd_mappable(folio), folio);
--
2.50.1
On 16 Sep 2025, at 8:21, Balbir Singh wrote:
> Add device-private THP support to reverse mapping infrastructure, enabling
> proper handling during migration and walk operations.
>
> The key changes are:
> - add_migration_pmd()/remove_migration_pmd(): Handle device-private
> entries during folio migration and splitting
> - page_vma_mapped_walk(): Recognize device-private THP entries during
> VMA traversal operations
>
> This change supports folio splitting and migration operations on
> device-private entries.
>
> Signed-off-by: Balbir Singh <balbirs@nvidia.com>
> Reviewed-by: SeongJae Park <sj@kernel.org>
> Cc: David Hildenbrand <david@redhat.com>
> Cc: Zi Yan <ziy@nvidia.com>
> Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
> Cc: Rakie Kim <rakie.kim@sk.com>
> Cc: Byungchul Park <byungchul@sk.com>
> Cc: Gregory Price <gourry@gourry.net>
> Cc: Ying Huang <ying.huang@linux.alibaba.com>
> Cc: Alistair Popple <apopple@nvidia.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
> Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
> Cc: Nico Pache <npache@redhat.com>
> Cc: Ryan Roberts <ryan.roberts@arm.com>
> Cc: Dev Jain <dev.jain@arm.com>
> Cc: Barry Song <baohua@kernel.org>
> Cc: Lyude Paul <lyude@redhat.com>
> Cc: Danilo Krummrich <dakr@kernel.org>
> Cc: David Airlie <airlied@gmail.com>
> Cc: Simona Vetter <simona@ffwll.ch>
> Cc: Ralph Campbell <rcampbell@nvidia.com>
> Cc: Mika Penttilä <mpenttil@redhat.com>
> Cc: Matthew Brost <matthew.brost@intel.com>
> Cc: Francois Dugast <francois.dugast@intel.com>
> ---
> mm/damon/ops-common.c | 20 +++++++++++++++++---
> mm/huge_memory.c | 16 +++++++++++++++-
> mm/page_idle.c | 7 +++++--
> mm/page_vma_mapped.c | 7 +++++++
> mm/rmap.c | 21 +++++++++++++++++----
> 5 files changed, 61 insertions(+), 10 deletions(-)
>
> diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
> index 998c5180a603..eda4de553611 100644
> --- a/mm/damon/ops-common.c
> +++ b/mm/damon/ops-common.c
> @@ -75,12 +75,24 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
> void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
> {
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> - struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
> + pmd_t pmdval = pmdp_get(pmd);
> + struct folio *folio;
> + bool young = false;
> + unsigned long pfn;
> +
> + if (likely(pmd_present(pmdval)))
> + pfn = pmd_pfn(pmdval);
> + else
> + pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
>
> + folio = damon_get_folio(pfn);
> if (!folio)
> return;
>
> - if (pmdp_clear_young_notify(vma, addr, pmd))
> + if (likely(pmd_present(pmdval)))
> + young |= pmdp_clear_young_notify(vma, addr, pmd);
> + young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
This should be HPAGE_PMD_SIZE (it is guarded in CONFIG_TRANSPARENT_HUGEPAGE,
so HPAGE_PMD_SIZE will not trigger a build bug like the one below).
> + if (young)
> folio_set_young(folio);
>
> folio_set_idle(folio);
> @@ -203,7 +215,9 @@ static bool damon_folio_young_one(struct folio *folio,
> mmu_notifier_test_young(vma->vm_mm, addr);
> } else {
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> - *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
> + pmd_t pmd = pmdp_get(pvmw.pmd);
> +
> + *accessed = (pmd_present(pmd) && pmd_young(pmd)) ||
> !folio_test_idle(folio) ||
> mmu_notifier_test_young(vma->vm_mm, addr);
> #else
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index a5e4c2aef191..78166db72f4d 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -4637,7 +4637,10 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
> return 0;
>
> flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
> - pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
> + if (unlikely(!pmd_present(*pvmw->pmd)))
> + pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd);
> + else
> + pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
>
> /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
> anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
> @@ -4687,6 +4690,17 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
> entry = pmd_to_swp_entry(*pvmw->pmd);
> folio_get(folio);
> pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
> +
> + if (folio_is_device_private(folio)) {
> + if (pmd_write(pmde))
> + entry = make_writable_device_private_entry(
> + page_to_pfn(new));
> + else
> + entry = make_readable_device_private_entry(
> + page_to_pfn(new));
> + pmde = swp_entry_to_pmd(entry);
> + }
> +
> if (pmd_swp_soft_dirty(*pvmw->pmd))
> pmde = pmd_mksoft_dirty(pmde);
> if (is_writable_migration_entry(entry))
> diff --git a/mm/page_idle.c b/mm/page_idle.c
> index a82b340dc204..3bf0fbe05cc2 100644
> --- a/mm/page_idle.c
> +++ b/mm/page_idle.c
> @@ -71,8 +71,11 @@ static bool page_idle_clear_pte_refs_one(struct folio *folio,
> referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte);
> referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
> } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
> - if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
> - referenced = true;
> + pmd_t pmdval = pmdp_get(pvmw.pmd);
> +
> + if (likely(pmd_present(pmdval)))
> + referenced |= pmdp_clear_young_notify(vma, addr, pvmw.pmd);
> + referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
This should be HPAGE_PMD_SIZE (or PMD_SIZE, since the code is not compiled
out when CONFIG_TRANSPARENT_HUGEPAGE is not selected and HPAGE_PMD_SIZE
will cause a build bug when CONFIG_PGTABLE_HAS_HUGE_LEAVES is not selected).
> } else {
> /* unexpected pmd-mapped page? */
> WARN_ON_ONCE(1);
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index e981a1a292d2..159953c590cc 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -277,6 +277,13 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
> * cannot return prematurely, while zap_huge_pmd() has
> * cleared *pmd but not decremented compound_mapcount().
> */
> + swp_entry_t entry = pmd_to_swp_entry(pmde);
> +
> + if (is_device_private_entry(entry)) {
> + pvmw->ptl = pmd_lock(mm, pvmw->pmd);
> + return true;
> + }
> +
> if ((pvmw->flags & PVMW_SYNC) &&
> thp_vma_suitable_order(vma, pvmw->address,
> PMD_ORDER) &&
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 9a2aabfaea6f..080fc4048431 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1063,9 +1063,11 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
> } else {
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> pmd_t *pmd = pvmw->pmd;
> - pmd_t entry;
> + pmd_t entry = pmdp_get(pmd);
>
> - if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
It is better to add a similar comment as the one above !pte_present().
Something like:
PFN swap PMDs, such as ...
> + if (!pmd_present(entry))
> + continue;
> + if (!pmd_dirty(entry) && !pmd_write(entry))
> continue;
>
> flush_cache_range(vma, address,
> @@ -2330,6 +2332,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
> while (page_vma_mapped_walk(&pvmw)) {
> /* PMD-mapped THP migration entry */
> if (!pvmw.pte) {
> +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
> + unsigned long pfn;
> + pmd_t pmdval;
> +#endif
> +
This looks ugly. IIRC, we now can put variable definition in the middle.
Maybe for this case, these two can be moved to the below ifdef region.
> if (flags & TTU_SPLIT_HUGE_PMD) {
> split_huge_pmd_locked(vma, pvmw.address,
> pvmw.pmd, true);
> @@ -2338,8 +2345,14 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
> break;
> }
> #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
> - subpage = folio_page(folio,
> - pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
> + pmdval = pmdp_get(pvmw.pmd);
> + if (likely(pmd_present(pmdval)))
> + pfn = pmd_pfn(pmdval);
> + else
> + pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
> +
> + subpage = folio_page(folio, pfn - folio_pfn(folio));
> +
> VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
> !folio_test_pmd_mappable(folio), folio);
>
> --
> 2.50.1
Otherwise, LGTM. Acked-by: Zi Yan <ziy@nvidia.com>
Best Regards,
Yan, Zi
On 9/23/25 06:13, Zi Yan wrote:
> On 16 Sep 2025, at 8:21, Balbir Singh wrote:
>
>> Add device-private THP support to reverse mapping infrastructure, enabling
>> proper handling during migration and walk operations.
>>
>> The key changes are:
>> - add_migration_pmd()/remove_migration_pmd(): Handle device-private
>> entries during folio migration and splitting
>> - page_vma_mapped_walk(): Recognize device-private THP entries during
>> VMA traversal operations
>>
>> This change supports folio splitting and migration operations on
>> device-private entries.
>>
>> Signed-off-by: Balbir Singh <balbirs@nvidia.com>
>> Reviewed-by: SeongJae Park <sj@kernel.org>
>> Cc: David Hildenbrand <david@redhat.com>
>> Cc: Zi Yan <ziy@nvidia.com>
>> Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
>> Cc: Rakie Kim <rakie.kim@sk.com>
>> Cc: Byungchul Park <byungchul@sk.com>
>> Cc: Gregory Price <gourry@gourry.net>
>> Cc: Ying Huang <ying.huang@linux.alibaba.com>
>> Cc: Alistair Popple <apopple@nvidia.com>
>> Cc: Oscar Salvador <osalvador@suse.de>
>> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
>> Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
>> Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
>> Cc: Nico Pache <npache@redhat.com>
>> Cc: Ryan Roberts <ryan.roberts@arm.com>
>> Cc: Dev Jain <dev.jain@arm.com>
>> Cc: Barry Song <baohua@kernel.org>
>> Cc: Lyude Paul <lyude@redhat.com>
>> Cc: Danilo Krummrich <dakr@kernel.org>
>> Cc: David Airlie <airlied@gmail.com>
>> Cc: Simona Vetter <simona@ffwll.ch>
>> Cc: Ralph Campbell <rcampbell@nvidia.com>
>> Cc: Mika Penttilä <mpenttil@redhat.com>
>> Cc: Matthew Brost <matthew.brost@intel.com>
>> Cc: Francois Dugast <francois.dugast@intel.com>
>> ---
>> mm/damon/ops-common.c | 20 +++++++++++++++++---
>> mm/huge_memory.c | 16 +++++++++++++++-
>> mm/page_idle.c | 7 +++++--
>> mm/page_vma_mapped.c | 7 +++++++
>> mm/rmap.c | 21 +++++++++++++++++----
>> 5 files changed, 61 insertions(+), 10 deletions(-)
>>
>> diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
>> index 998c5180a603..eda4de553611 100644
>> --- a/mm/damon/ops-common.c
>> +++ b/mm/damon/ops-common.c
>> @@ -75,12 +75,24 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
>> void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
>> {
>> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> - struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
>> + pmd_t pmdval = pmdp_get(pmd);
>> + struct folio *folio;
>> + bool young = false;
>> + unsigned long pfn;
>> +
>> + if (likely(pmd_present(pmdval)))
>> + pfn = pmd_pfn(pmdval);
>> + else
>> + pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
>>
>> + folio = damon_get_folio(pfn);
>> if (!folio)
>> return;
>>
>> - if (pmdp_clear_young_notify(vma, addr, pmd))
>> + if (likely(pmd_present(pmdval)))
>> + young |= pmdp_clear_young_notify(vma, addr, pmd);
>> + young |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
>
> This should be HPAGE_PMD_SIZE (it is guarded in CONFIG_TRANSPARENT_HUGEPAGE,
> so HPAGE_PMD_SIZE will not trigger a build bug like the one below).
>
>> + if (young)
>> folio_set_young(folio);
>>
>> folio_set_idle(folio);
>> @@ -203,7 +215,9 @@ static bool damon_folio_young_one(struct folio *folio,
>> mmu_notifier_test_young(vma->vm_mm, addr);
>> } else {
>> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> - *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
>> + pmd_t pmd = pmdp_get(pvmw.pmd);
>> +
>> + *accessed = (pmd_present(pmd) && pmd_young(pmd)) ||
>> !folio_test_idle(folio) ||
>> mmu_notifier_test_young(vma->vm_mm, addr);
>> #else
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index a5e4c2aef191..78166db72f4d 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -4637,7 +4637,10 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
>> return 0;
>>
>> flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
>> - pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
>> + if (unlikely(!pmd_present(*pvmw->pmd)))
>> + pmdval = pmdp_huge_get_and_clear(vma->vm_mm, address, pvmw->pmd);
>> + else
>> + pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
>>
>> /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
>> anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
>> @@ -4687,6 +4690,17 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
>> entry = pmd_to_swp_entry(*pvmw->pmd);
>> folio_get(folio);
>> pmde = folio_mk_pmd(folio, READ_ONCE(vma->vm_page_prot));
>> +
>> + if (folio_is_device_private(folio)) {
>> + if (pmd_write(pmde))
>> + entry = make_writable_device_private_entry(
>> + page_to_pfn(new));
>> + else
>> + entry = make_readable_device_private_entry(
>> + page_to_pfn(new));
>> + pmde = swp_entry_to_pmd(entry);
>> + }
>> +
>> if (pmd_swp_soft_dirty(*pvmw->pmd))
>> pmde = pmd_mksoft_dirty(pmde);
>> if (is_writable_migration_entry(entry))
>> diff --git a/mm/page_idle.c b/mm/page_idle.c
>> index a82b340dc204..3bf0fbe05cc2 100644
>> --- a/mm/page_idle.c
>> +++ b/mm/page_idle.c
>> @@ -71,8 +71,11 @@ static bool page_idle_clear_pte_refs_one(struct folio *folio,
>> referenced |= ptep_test_and_clear_young(vma, addr, pvmw.pte);
>> referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
>> } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
>> - if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
>> - referenced = true;
>> + pmd_t pmdval = pmdp_get(pvmw.pmd);
>> +
>> + if (likely(pmd_present(pmdval)))
>> + referenced |= pmdp_clear_young_notify(vma, addr, pvmw.pmd);
>> + referenced |= mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE);
>
> This should be HPAGE_PMD_SIZE (or PMD_SIZE, since the code is not compiled
> out when CONFIG_TRANSPARENT_HUGEPAGE is not selected and HPAGE_PMD_SIZE
> will cause a build bug when CONFIG_PGTABLE_HAS_HUGE_LEAVES is not selected).
I'll protect it accordingly, thanks!
>
>> } else {
>> /* unexpected pmd-mapped page? */
>> WARN_ON_ONCE(1);
>> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
>> index e981a1a292d2..159953c590cc 100644
>> --- a/mm/page_vma_mapped.c
>> +++ b/mm/page_vma_mapped.c
>> @@ -277,6 +277,13 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>> * cannot return prematurely, while zap_huge_pmd() has
>> * cleared *pmd but not decremented compound_mapcount().
>> */
>> + swp_entry_t entry = pmd_to_swp_entry(pmde);
>> +
>> + if (is_device_private_entry(entry)) {
>> + pvmw->ptl = pmd_lock(mm, pvmw->pmd);
>> + return true;
>> + }
>> +
>> if ((pvmw->flags & PVMW_SYNC) &&
>> thp_vma_suitable_order(vma, pvmw->address,
>> PMD_ORDER) &&
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index 9a2aabfaea6f..080fc4048431 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -1063,9 +1063,11 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
>> } else {
>> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> pmd_t *pmd = pvmw->pmd;
>> - pmd_t entry;
>> + pmd_t entry = pmdp_get(pmd);
>>
>> - if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
>
> It is better to add a similar comment as the one above !pte_present().
> Something like:
> PFN swap PMDs, such as ...
>
>
Sure, can do and repeat the comment or just say look at the comments for !pte_present() :)
>> + if (!pmd_present(entry))
>> + continue;
>> + if (!pmd_dirty(entry) && !pmd_write(entry))
>> continue;
>>
>> flush_cache_range(vma, address,
>> @@ -2330,6 +2332,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
>> while (page_vma_mapped_walk(&pvmw)) {
>> /* PMD-mapped THP migration entry */
>> if (!pvmw.pte) {
>> +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
>> + unsigned long pfn;
>> + pmd_t pmdval;
>> +#endif
>> +
>
> This looks ugly. IIRC, we now can put variable definition in the middle.
> Maybe for this case, these two can be moved to the below ifdef region.
>
I can't find any examples of mixing declarations and could not find any clear
guidance in the coding style
>> if (flags & TTU_SPLIT_HUGE_PMD) {
>> split_huge_pmd_locked(vma, pvmw.address,
>> pvmw.pmd, true);
>> @@ -2338,8 +2345,14 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
>> break;
>> }
>> #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
>> - subpage = folio_page(folio,
>> - pmd_pfn(*pvmw.pmd) - folio_pfn(folio));
>> + pmdval = pmdp_get(pvmw.pmd);
>> + if (likely(pmd_present(pmdval)))
>> + pfn = pmd_pfn(pmdval);
>> + else
>> + pfn = swp_offset_pfn(pmd_to_swp_entry(pmdval));
>> +
>> + subpage = folio_page(folio, pfn - folio_pfn(folio));
>> +
>> VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
>> !folio_test_pmd_mappable(folio), folio);
>>
>> --
>> 2.50.1
>
> Otherwise, LGTM. Acked-by: Zi Yan <ziy@nvidia.com>
Thanks for the review,
Balbir
>>> + if (!pmd_present(entry))
>>> + continue;
>>> + if (!pmd_dirty(entry) && !pmd_write(entry))
>>> continue;
>>>
>>> flush_cache_range(vma, address,
>>> @@ -2330,6 +2332,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
>>> while (page_vma_mapped_walk(&pvmw)) {
>>> /* PMD-mapped THP migration entry */
>>> if (!pvmw.pte) {
>>> +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
>>> + unsigned long pfn;
>>> + pmd_t pmdval;
>>> +#endif
>>> +
>>
>> This looks ugly. IIRC, we now can put variable definition in the middle.
>> Maybe for this case, these two can be moved to the below ifdef region.
>>
>
> I can't find any examples of mixing declarations and could not find any clear
> guidance in the coding style
Rather not do it :)
__maybe_unsed might help avoiding the ifdef.
--
Cheers
David / dhildenb
© 2016 - 2026 Red Hat, Inc.