We do not need to have explicit helper functions for these, it adds a level
of confusion and indirection when we can simply use software leaf entry
logic here instead and spell out the special huge_pte_none() case we must
consider.
No functional change intended.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
fs/proc/task_mmu.c | 19 +++++----
include/linux/hugetlb.h | 2 -
mm/hugetlb.c | 91 +++++++++++++++++------------------------
mm/mempolicy.c | 17 +++++---
mm/migrate.c | 15 +++++--
5 files changed, 69 insertions(+), 75 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6cb9e1691e18..3cdefa7546db 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -2499,22 +2499,23 @@ static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t ptent)
{
- unsigned long psize;
+ const unsigned long psize = huge_page_size(hstate_vma(vma));
+ softleaf_t entry;
- if (is_hugetlb_entry_hwpoisoned(ptent) || pte_is_marker(ptent))
- return;
+ if (huge_pte_none(ptent))
+ set_huge_pte_at(vma->vm_mm, addr, ptep,
+ make_pte_marker(PTE_MARKER_UFFD_WP), psize);
- psize = huge_page_size(hstate_vma(vma));
+ entry = softleaf_from_pte(ptent);
+ if (softleaf_is_hwpoison(entry) || softleaf_is_marker(entry))
+ return;
- if (is_hugetlb_entry_migration(ptent))
+ if (softleaf_is_migration(entry))
set_huge_pte_at(vma->vm_mm, addr, ptep,
pte_swp_mkuffd_wp(ptent), psize);
- else if (!huge_pte_none(ptent))
+ else
huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
huge_pte_mkuffd_wp(ptent));
- else
- set_huge_pte_at(vma->vm_mm, addr, ptep,
- make_pte_marker(PTE_MARKER_UFFD_WP), psize);
}
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 2387513d6ae5..457d48ac7bcd 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -274,8 +274,6 @@ void hugetlb_vma_lock_release(struct kref *kref);
long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot,
unsigned long cp_flags);
-bool is_hugetlb_entry_migration(pte_t pte);
-bool is_hugetlb_entry_hwpoisoned(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
void fixup_hugetlb_reservations(struct vm_area_struct *vma);
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a74cde267c2a..b702b161ab35 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5552,32 +5552,6 @@ static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma,
set_huge_ptep_writable(vma, address, ptep);
}
-bool is_hugetlb_entry_migration(pte_t pte)
-{
- swp_entry_t swp;
-
- if (huge_pte_none(pte) || pte_present(pte))
- return false;
- swp = pte_to_swp_entry(pte);
- if (is_migration_entry(swp))
- return true;
- else
- return false;
-}
-
-bool is_hugetlb_entry_hwpoisoned(pte_t pte)
-{
- swp_entry_t swp;
-
- if (huge_pte_none(pte) || pte_present(pte))
- return false;
- swp = pte_to_swp_entry(pte);
- if (is_hwpoison_entry(swp))
- return true;
- else
- return false;
-}
-
static void
hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
struct folio *new_folio, pte_t old, unsigned long sz)
@@ -5606,6 +5580,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
unsigned long npages = pages_per_huge_page(h);
struct mmu_notifier_range range;
unsigned long last_addr_mask;
+ softleaf_t softleaf;
int ret = 0;
if (cow) {
@@ -5653,16 +5628,16 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
again:
if (huge_pte_none(entry)) {
- /*
- * Skip if src entry none.
- */
- ;
- } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
+ /* Skip if src entry none. */
+ goto next;
+ }
+
+ softleaf = softleaf_from_pte(entry);
+ if (unlikely(softleaf_is_hwpoison(softleaf))) {
if (!userfaultfd_wp(dst_vma))
entry = huge_pte_clear_uffd_wp(entry);
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
- } else if (unlikely(is_hugetlb_entry_migration(entry))) {
- softleaf_t softleaf = softleaf_from_pte(entry);
+ } else if (unlikely(softleaf_is_migration(softleaf))) {
bool uffd_wp = pte_swp_uffd_wp(entry);
if (!is_readable_migration_entry(softleaf) && cow) {
@@ -5681,7 +5656,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
entry = huge_pte_clear_uffd_wp(entry);
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
} else if (unlikely(pte_is_marker(entry))) {
- const softleaf_t softleaf = softleaf_from_pte(entry);
const pte_marker marker = copy_pte_marker(softleaf, dst_vma);
if (marker)
@@ -5739,9 +5713,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
}
hugetlb_install_folio(dst_vma, dst_pte, addr,
new_folio, src_pte_old, sz);
- spin_unlock(src_ptl);
- spin_unlock(dst_ptl);
- continue;
+ goto next;
}
if (cow) {
@@ -5762,6 +5734,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
set_huge_pte_at(dst, addr, dst_pte, entry, sz);
hugetlb_count_add(npages, dst);
}
+
+next:
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
}
@@ -6770,8 +6744,10 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = 0;
/* Not present, either a migration or a hwpoisoned entry */
- if (!pte_present(vmf.orig_pte)) {
- if (is_hugetlb_entry_migration(vmf.orig_pte)) {
+ if (!pte_present(vmf.orig_pte) && !huge_pte_none(vmf.orig_pte)) {
+ const softleaf_t softleaf = softleaf_from_pte(vmf.orig_pte);
+
+ if (softleaf_is_migration(softleaf)) {
/*
* Release the hugetlb fault lock now, but retain
* the vma lock, because it is needed to guard the
@@ -6782,9 +6758,12 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
migration_entry_wait_huge(vma, vmf.address, vmf.pte);
return 0;
- } else if (is_hugetlb_entry_hwpoisoned(vmf.orig_pte))
+ }
+ if (softleaf_is_hwpoison(softleaf)) {
ret = VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
+ }
+
goto out_mutex;
}
@@ -7166,7 +7145,9 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
i_mmap_lock_write(vma->vm_file->f_mapping);
last_addr_mask = hugetlb_mask_last_page(h);
for (; address < end; address += psize) {
+ softleaf_t entry;
spinlock_t *ptl;
+
ptep = hugetlb_walk(vma, address, psize);
if (!ptep) {
if (!uffd_wp) {
@@ -7198,15 +7179,23 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
continue;
}
pte = huge_ptep_get(mm, address, ptep);
- if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
- /* Nothing to do. */
- } else if (unlikely(is_hugetlb_entry_migration(pte))) {
- softleaf_t entry = softleaf_from_pte(pte);
+ if (huge_pte_none(pte)) {
+ if (unlikely(uffd_wp))
+ /* Safe to modify directly (none->non-present). */
+ set_huge_pte_at(mm, address, ptep,
+ make_pte_marker(PTE_MARKER_UFFD_WP),
+ psize);
+ goto next;
+ }
+ entry = softleaf_from_pte(pte);
+ if (unlikely(softleaf_is_hwpoison(entry))) {
+ /* Nothing to do. */
+ } else if (unlikely(softleaf_is_migration(entry))) {
struct folio *folio = softleaf_to_folio(entry);
pte_t newpte = pte;
- if (is_writable_migration_entry(entry)) {
+ if (softleaf_is_migration_write(entry)) {
if (folio_test_anon(folio))
entry = make_readable_exclusive_migration_entry(
swp_offset(entry));
@@ -7233,7 +7222,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
if (pte_is_uffd_wp_marker(pte) && uffd_wp_resolve)
/* Safe to modify directly (non-present->none). */
huge_pte_clear(mm, address, ptep, psize);
- } else if (!huge_pte_none(pte)) {
+ } else {
pte_t old_pte;
unsigned int shift = huge_page_shift(hstate_vma(vma));
@@ -7246,16 +7235,10 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
pte = huge_pte_clear_uffd_wp(pte);
huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
pages++;
- } else {
- /* None pte */
- if (unlikely(uffd_wp))
- /* Safe to modify directly (none->non-present). */
- set_huge_pte_at(mm, address, ptep,
- make_pte_marker(PTE_MARKER_UFFD_WP),
- psize);
}
- spin_unlock(ptl);
+next:
+ spin_unlock(ptl);
cond_resched();
}
/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 01c3b98f87a6..dee95d5ecfd4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -768,16 +768,21 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
unsigned long flags = qp->flags;
struct folio *folio;
spinlock_t *ptl;
- pte_t entry;
+ pte_t ptep;
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
- entry = huge_ptep_get(walk->mm, addr, pte);
- if (!pte_present(entry)) {
- if (unlikely(is_hugetlb_entry_migration(entry)))
- qp->nr_failed++;
+ ptep = huge_ptep_get(walk->mm, addr, pte);
+ if (!pte_present(ptep)) {
+ if (!huge_pte_none(ptep)) {
+ const softleaf_t entry = softleaf_from_pte(ptep);
+
+ if (unlikely(softleaf_is_migration(entry)))
+ qp->nr_failed++;
+ }
+
goto unlock;
}
- folio = pfn_folio(pte_pfn(entry));
+ folio = pfn_folio(pte_pfn(ptep));
if (!queue_folio_required(folio, qp))
goto unlock;
if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
diff --git a/mm/migrate.c b/mm/migrate.c
index 3b6bd374157d..48f98a6c1ad2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -515,16 +515,18 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
+ softleaf_t entry;
pte_t pte;
hugetlb_vma_assert_locked(vma);
spin_lock(ptl);
pte = huge_ptep_get(vma->vm_mm, addr, ptep);
- if (unlikely(!is_hugetlb_entry_migration(pte))) {
- spin_unlock(ptl);
- hugetlb_vma_unlock_read(vma);
- } else {
+ if (huge_pte_none(pte))
+ goto fail;
+
+ entry = softleaf_from_pte(pte);
+ if (softleaf_is_migration(entry)) {
/*
* If migration entry existed, safe to release vma lock
* here because the pgtable page won't be freed without the
@@ -533,7 +535,12 @@ void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, p
*/
hugetlb_vma_unlock_read(vma);
migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
+ return;
}
+
+fail:
+ spin_unlock(ptl);
+ hugetlb_vma_unlock_read(vma);
}
#endif
--
2.51.0
Hi Andrew,
Please apply this fix.
Thanks, Lorenzo
----8<----
From fab663ef70a57f71aef762538a9b31deca811791 Mon Sep 17 00:00:00 2001
From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Date: Thu, 27 Nov 2025 17:41:49 +0000
Subject: [PATCH] fixup
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
fs/proc/task_mmu.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index d00ac179d973..81dfc26bfae8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -2500,9 +2500,11 @@ static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
const unsigned long psize = huge_page_size(hstate_vma(vma));
softleaf_t entry;
- if (huge_pte_none(ptent))
+ if (huge_pte_none(ptent)) {
set_huge_pte_at(vma->vm_mm, addr, ptep,
make_pte_marker(PTE_MARKER_UFFD_WP), psize);
+ return;
+ }
entry = softleaf_from_pte(ptent);
if (softleaf_is_hwpoison(entry) || softleaf_is_marker(entry))
--
2.51.2
On Thu, 27 Nov 2025 17:45:17 +0000 Lorenzo Stoakes <lorenzo.stoakes@oracle.com> wrote:
> Hi Andrew,
>
> Please apply this fix.
>
The offending patch is in mm-stable now, so I'll do this as a
hey-git-made-me-add-a-bisection-hole commit.
From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Subject: fs/proc/task_mmu.c: fix make_uffd_wp_huge_pte() huge pte handling
Date: Thu, 27 Nov 2025 17:45:17 +0000
make_uffd_wp_huge_pte() should return after handling a huge_pte_none()
pte.
Link: https://lkml.kernel.org/r/66178124-ebdf-4e23-b8ca-ed3eb8030c81@lucifer.local
Fixes: 03bfbc3ad6e4 ("mm: remove is_hugetlb_entry_[migration, hwpoisoned]()")
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reported-by: Vlastimil Babka <vbabka@suse.cz>
Closes: https://lkml.kernel.org/r/dc483db3-be4d-45f7-8b40-a28f5d8f5738@suse.cz
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
fs/proc/task_mmu.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
--- a/fs/proc/task_mmu.c~fs-proc-task_mmuc-fix-make_uffd_wp_huge_pte-huge-pte-handling
+++ a/fs/proc/task_mmu.c
@@ -2500,9 +2500,11 @@ static void make_uffd_wp_huge_pte(struct
const unsigned long psize = huge_page_size(hstate_vma(vma));
softleaf_t entry;
- if (huge_pte_none(ptent))
+ if (huge_pte_none(ptent)) {
set_huge_pte_at(vma->vm_mm, addr, ptep,
make_pte_marker(PTE_MARKER_UFFD_WP), psize);
+ return;
+ }
entry = softleaf_from_pte(ptent);
if (softleaf_is_hwpoison(entry) || softleaf_is_marker(entry))
_
On 11/10/25 23:21, Lorenzo Stoakes wrote:
> We do not need to have explicit helper functions for these, it adds a level
> of confusion and indirection when we can simply use software leaf entry
> logic here instead and spell out the special huge_pte_none() case we must
> consider.
>
> No functional change intended.
>
> Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
But seems to me a fixup is needed:
> ---
> fs/proc/task_mmu.c | 19 +++++----
> include/linux/hugetlb.h | 2 -
> mm/hugetlb.c | 91 +++++++++++++++++------------------------
> mm/mempolicy.c | 17 +++++---
> mm/migrate.c | 15 +++++--
> 5 files changed, 69 insertions(+), 75 deletions(-)
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 6cb9e1691e18..3cdefa7546db 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -2499,22 +2499,23 @@ static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
> unsigned long addr, pte_t *ptep,
> pte_t ptent)
> {
> - unsigned long psize;
> + const unsigned long psize = huge_page_size(hstate_vma(vma));
> + softleaf_t entry;
>
> - if (is_hugetlb_entry_hwpoisoned(ptent) || pte_is_marker(ptent))
> - return;
> + if (huge_pte_none(ptent))
> + set_huge_pte_at(vma->vm_mm, addr, ptep,
> + make_pte_marker(PTE_MARKER_UFFD_WP), psize);
Shouldn't we return here? Otherwise AFAICS we'll also reach the
huge_ptep_modify_prot_commit() below and that wasn't happening before.
>
> - psize = huge_page_size(hstate_vma(vma));
> + entry = softleaf_from_pte(ptent);
> + if (softleaf_is_hwpoison(entry) || softleaf_is_marker(entry))
> + return;
>
> - if (is_hugetlb_entry_migration(ptent))
> + if (softleaf_is_migration(entry))
> set_huge_pte_at(vma->vm_mm, addr, ptep,
> pte_swp_mkuffd_wp(ptent), psize);
> - else if (!huge_pte_none(ptent))
> + else
> huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
> huge_pte_mkuffd_wp(ptent));
> - else
> - set_huge_pte_at(vma->vm_mm, addr, ptep,
> - make_pte_marker(PTE_MARKER_UFFD_WP), psize);
> }
> #endif /* CONFIG_HUGETLB_PAGE */
>
On Thu, Nov 27, 2025 at 06:29:39PM +0100, Vlastimil Babka wrote:
> On 11/10/25 23:21, Lorenzo Stoakes wrote:
> > We do not need to have explicit helper functions for these, it adds a level
> > of confusion and indirection when we can simply use software leaf entry
> > logic here instead and spell out the special huge_pte_none() case we must
> > consider.
> >
> > No functional change intended.
> >
> > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
>
> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
>
> But seems to me a fixup is needed:
>
> > ---
> > fs/proc/task_mmu.c | 19 +++++----
> > include/linux/hugetlb.h | 2 -
> > mm/hugetlb.c | 91 +++++++++++++++++------------------------
> > mm/mempolicy.c | 17 +++++---
> > mm/migrate.c | 15 +++++--
> > 5 files changed, 69 insertions(+), 75 deletions(-)
> >
> > diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> > index 6cb9e1691e18..3cdefa7546db 100644
> > --- a/fs/proc/task_mmu.c
> > +++ b/fs/proc/task_mmu.c
> > @@ -2499,22 +2499,23 @@ static void make_uffd_wp_huge_pte(struct vm_area_struct *vma,
> > unsigned long addr, pte_t *ptep,
> > pte_t ptent)
> > {
> > - unsigned long psize;
> > + const unsigned long psize = huge_page_size(hstate_vma(vma));
> > + softleaf_t entry;
> >
> > - if (is_hugetlb_entry_hwpoisoned(ptent) || pte_is_marker(ptent))
> > - return;
> > + if (huge_pte_none(ptent))
> > + set_huge_pte_at(vma->vm_mm, addr, ptep,
> > + make_pte_marker(PTE_MARKER_UFFD_WP), psize);
>
> Shouldn't we return here? Otherwise AFAICS we'll also reach the
> huge_ptep_modify_prot_commit() below and that wasn't happening before.
Yup, will reply with fix-patch, thanks.
>
> >
> > - psize = huge_page_size(hstate_vma(vma));
> > + entry = softleaf_from_pte(ptent);
> > + if (softleaf_is_hwpoison(entry) || softleaf_is_marker(entry))
> > + return;
> >
> > - if (is_hugetlb_entry_migration(ptent))
> > + if (softleaf_is_migration(entry))
> > set_huge_pte_at(vma->vm_mm, addr, ptep,
> > pte_swp_mkuffd_wp(ptent), psize);
> > - else if (!huge_pte_none(ptent))
> > + else
> > huge_ptep_modify_prot_commit(vma, addr, ptep, ptent,
> > huge_pte_mkuffd_wp(ptent));
> > - else
> > - set_huge_pte_at(vma->vm_mm, addr, ptep,
> > - make_pte_marker(PTE_MARKER_UFFD_WP), psize);
> > }
> > #endif /* CONFIG_HUGETLB_PAGE */
> >
© 2016 - 2025 Red Hat, Inc.