This reverts commit e317a8d8b4f600fc7ec9725e26417030ee594f52 and changes
PageKsm(page) to folio_test_ksm(page_folio(page)).
This reverts break_ksm() to use walk_page_range_vma() instead of
folio_walk_start().
This will make it easier to later modify break_ksm() to perform a proper
range walk.
Suggested-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
---
mm/ksm.c | 63 ++++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 47 insertions(+), 16 deletions(-)
diff --git a/mm/ksm.c b/mm/ksm.c
index 4f672f4f2140..2a9a7fd4c777 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -607,6 +607,47 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
return atomic_read(&mm->mm_users) == 0;
}
+static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next,
+ struct mm_walk *walk)
+{
+ struct page *page = NULL;
+ spinlock_t *ptl;
+ pte_t *pte;
+ pte_t ptent;
+ int ret;
+
+ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte)
+ return 0;
+ ptent = ptep_get(pte);
+ if (pte_present(ptent)) {
+ page = vm_normal_page(walk->vma, addr, ptent);
+ } else if (!pte_none(ptent)) {
+ swp_entry_t entry = pte_to_swp_entry(ptent);
+
+ /*
+ * As KSM pages remain KSM pages until freed, no need to wait
+ * here for migration to end.
+ */
+ if (is_migration_entry(entry))
+ page = pfn_swap_entry_to_page(entry);
+ }
+ /* return 1 if the page is an normal ksm page or KSM-placed zero page */
+ ret = (page && folio_test_ksm(page_folio(page))) || is_ksm_zero_pte(ptent);
+ pte_unmap_unlock(pte, ptl);
+ return ret;
+}
+
+static const struct mm_walk_ops break_ksm_ops = {
+ .pmd_entry = break_ksm_pmd_entry,
+ .walk_lock = PGWALK_RDLOCK,
+};
+
+static const struct mm_walk_ops break_ksm_lock_vma_ops = {
+ .pmd_entry = break_ksm_pmd_entry,
+ .walk_lock = PGWALK_WRLOCK,
+};
+
/*
* We use break_ksm to break COW on a ksm page by triggering unsharing,
* such that the ksm page will get replaced by an exclusive anonymous page.
@@ -623,26 +664,16 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
{
vm_fault_t ret = 0;
-
- if (lock_vma)
- vma_start_write(vma);
+ const struct mm_walk_ops *ops = lock_vma ?
+ &break_ksm_lock_vma_ops : &break_ksm_ops;
do {
- bool ksm_page = false;
- struct folio_walk fw;
- struct folio *folio;
+ int ksm_page;
cond_resched();
- folio = folio_walk_start(&fw, vma, addr,
- FW_MIGRATION | FW_ZEROPAGE);
- if (folio) {
- /* Small folio implies FW_LEVEL_PTE. */
- if (!folio_test_large(folio) &&
- (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte)))
- ksm_page = true;
- folio_walk_end(&fw, vma);
- }
-
+ ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL);
+ if (WARN_ON_ONCE(ksm_page < 0))
+ return ksm_page;
if (!ksm_page)
return 0;
ret = handle_mm_fault(vma, addr,
--
2.43.0
On 28.10.25 14:19, Pedro Demarchi Gomes wrote:
> This reverts commit e317a8d8b4f600fc7ec9725e26417030ee594f52 and changes
> PageKsm(page) to folio_test_ksm(page_folio(page)).
>
> This reverts break_ksm() to use walk_page_range_vma() instead of
> folio_walk_start().
> This will make it easier to later modify break_ksm() to perform a proper
> range walk.
>
> Suggested-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
> ---
> mm/ksm.c | 63 ++++++++++++++++++++++++++++++++++++++++++--------------
> 1 file changed, 47 insertions(+), 16 deletions(-)
>
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 4f672f4f2140..2a9a7fd4c777 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -607,6 +607,47 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
> return atomic_read(&mm->mm_users) == 0;
> }
>
> +static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next,
> + struct mm_walk *walk)
> +{
> + struct page *page = NULL;
> + spinlock_t *ptl;
> + pte_t *pte;
> + pte_t ptent;
> + int ret;
> +
> + pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
> + if (!pte)
> + return 0;
> + ptent = ptep_get(pte);
> + if (pte_present(ptent)) {
> + page = vm_normal_page(walk->vma, addr, ptent);
folio = vm_normal_folio()
> + } else if (!pte_none(ptent)) {
> + swp_entry_t entry = pte_to_swp_entry(ptent);
> +
> + /*
> + * As KSM pages remain KSM pages until freed, no need to wait
> + * here for migration to end.
> + */
> + if (is_migration_entry(entry))
> + page = pfn_swap_entry_to_page(entry);
folio = pfn_swap_entry_folio()
> + }
> + /* return 1 if the page is an normal ksm page or KSM-placed zero page */
> + ret = (page && folio_test_ksm(page_folio(page))) || is_ksm_zero_pte(ptent);
The you can directly work with folios here.
--
Cheers
David / dhildenb
On Wed, Oct 29, 2025 at 03:34:23PM +0100, David Hildenbrand wrote:
> On 28.10.25 14:19, Pedro Demarchi Gomes wrote:
> > This reverts commit e317a8d8b4f600fc7ec9725e26417030ee594f52 and changes
> > PageKsm(page) to folio_test_ksm(page_folio(page)).
> >
> > This reverts break_ksm() to use walk_page_range_vma() instead of
> > folio_walk_start().
> > This will make it easier to later modify break_ksm() to perform a proper
> > range walk.
> >
> > Suggested-by: David Hildenbrand <david@redhat.com>
> > Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
> > ---
> > mm/ksm.c | 63 ++++++++++++++++++++++++++++++++++++++++++--------------
> > 1 file changed, 47 insertions(+), 16 deletions(-)
> >
> > diff --git a/mm/ksm.c b/mm/ksm.c
> > index 4f672f4f2140..2a9a7fd4c777 100644
> > --- a/mm/ksm.c
> > +++ b/mm/ksm.c
> > @@ -607,6 +607,47 @@ static inline bool ksm_test_exit(struct mm_struct *mm)
> > return atomic_read(&mm->mm_users) == 0;
> > }
> > +static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next,
> > + struct mm_walk *walk)
> > +{
> > + struct page *page = NULL;
> > + spinlock_t *ptl;
> > + pte_t *pte;
> > + pte_t ptent;
> > + int ret;
> > +
> > + pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
> > + if (!pte)
> > + return 0;
> > + ptent = ptep_get(pte);
> > + if (pte_present(ptent)) {
> > + page = vm_normal_page(walk->vma, addr, ptent);
>
> folio = vm_normal_folio()
>
> > + } else if (!pte_none(ptent)) {
> > + swp_entry_t entry = pte_to_swp_entry(ptent);
> > +
> > + /*
> > + * As KSM pages remain KSM pages until freed, no need to wait
> > + * here for migration to end.
> > + */
> > + if (is_migration_entry(entry))
> > + page = pfn_swap_entry_to_page(entry);
>
> folio = pfn_swap_entry_folio()
>
> > + }
> > + /* return 1 if the page is an normal ksm page or KSM-placed zero page */
> > + ret = (page && folio_test_ksm(page_folio(page))) || is_ksm_zero_pte(ptent);
>
>
> The you can directly work with folios here.
>
Ack, will do.
> --
> Cheers
>
> David / dhildenb
>
>
© 2016 - 2026 Red Hat, Inc.