Function unmerge_ksm_pages() is unnecessary since now break_ksm() walks
an address range. So replace it with break_ksm().
Suggested-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
---
mm/ksm.c | 39 ++++++++++++++++-----------------------
1 file changed, 16 insertions(+), 23 deletions(-)
diff --git a/mm/ksm.c b/mm/ksm.c
index 1d1ef0554c7c..18c9e3bda285 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -669,6 +669,18 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = {
};
/*
+ * Though it's very tempting to unmerge rmap_items from stable tree rather
+ * than check every pte of a given vma, the locking doesn't quite work for
+ * that - an rmap_item is assigned to the stable tree after inserting ksm
+ * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
+ * rmap_items from parent to child at fork time (so as not to waste time
+ * if exit comes before the next scan reaches it).
+ *
+ * Similarly, although we'd like to remove rmap_items (so updating counts
+ * and freeing memory) when unmerging an area, it's easier to leave that
+ * to the next pass of ksmd - consider, for example, how ksmd might be
+ * in cmp_and_merge_page on one of the rmap_items we would be removing.
+ *
* We use break_ksm to break COW on a ksm page by triggering unsharing,
* such that the ksm page will get replaced by an exclusive anonymous page.
*
@@ -1077,25 +1089,6 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
}
}
-/*
- * Though it's very tempting to unmerge rmap_items from stable tree rather
- * than check every pte of a given vma, the locking doesn't quite work for
- * that - an rmap_item is assigned to the stable tree after inserting ksm
- * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
- * rmap_items from parent to child at fork time (so as not to waste time
- * if exit comes before the next scan reaches it).
- *
- * Similarly, although we'd like to remove rmap_items (so updating counts
- * and freeing memory) when unmerging an area, it's easier to leave that
- * to the next pass of ksmd - consider, for example, how ksmd might be
- * in cmp_and_merge_page on one of the rmap_items we would be removing.
- */
-static int unmerge_ksm_pages(struct vm_area_struct *vma,
- unsigned long start, unsigned long end, bool lock_vma)
-{
- return break_ksm(vma, start, end, lock_vma);
-}
-
static inline
struct ksm_stable_node *folio_stable_node(const struct folio *folio)
{
@@ -1233,7 +1226,7 @@ static int unmerge_and_remove_all_rmap_items(void)
for_each_vma(vmi, vma) {
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
continue;
- err = unmerge_ksm_pages(vma,
+ err = break_ksm(vma,
vma->vm_start, vma->vm_end, false);
if (err)
goto error;
@@ -2861,7 +2854,7 @@ static int __ksm_del_vma(struct vm_area_struct *vma)
return 0;
if (vma->anon_vma) {
- err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true);
+ err = break_ksm(vma, vma->vm_start, vma->vm_end, true);
if (err)
return err;
}
@@ -3013,7 +3006,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0; /* just ignore the advice */
if (vma->anon_vma) {
- err = unmerge_ksm_pages(vma, start, end, true);
+ err = break_ksm(vma, start, end, true);
if (err)
return err;
}
@@ -3395,7 +3388,7 @@ static int ksm_memory_callback(struct notifier_block *self,
* Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
* and remove_all_stable_nodes() while memory is going offline:
* it is unsafe for them to touch the stable tree at this time.
- * But unmerge_ksm_pages(), rmap lookups and other entry points
+ * But break_ksm(), rmap lookups and other entry points
* which do not need the ksm_thread_mutex are all safe.
*/
mutex_lock(&ksm_thread_mutex);
--
2.43.0
On 28.10.25 14:19, Pedro Demarchi Gomes wrote:
> Function unmerge_ksm_pages() is unnecessary since now break_ksm() walks
> an address range. So replace it with break_ksm().
>
> Suggested-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
> ---
> mm/ksm.c | 39 ++++++++++++++++-----------------------
> 1 file changed, 16 insertions(+), 23 deletions(-)
>
> diff --git a/mm/ksm.c b/mm/ksm.c
> index 1d1ef0554c7c..18c9e3bda285 100644
> --- a/mm/ksm.c
> +++ b/mm/ksm.c
> @@ -669,6 +669,18 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = {
> };
>
> /*
> + * Though it's very tempting to unmerge rmap_items from stable tree rather
> + * than check every pte of a given vma, the locking doesn't quite work for
> + * that - an rmap_item is assigned to the stable tree after inserting ksm
> + * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
> + * rmap_items from parent to child at fork time (so as not to waste time
> + * if exit comes before the next scan reaches it).
> + *
> + * Similarly, although we'd like to remove rmap_items (so updating counts
> + * and freeing memory) when unmerging an area, it's easier to leave that
> + * to the next pass of ksmd - consider, for example, how ksmd might be
> + * in cmp_and_merge_page on one of the rmap_items we would be removing.
> + *
> * We use break_ksm to break COW on a ksm page by triggering unsharing,
> * such that the ksm page will get replaced by an exclusive anonymous page.
> *
> @@ -1077,25 +1089,6 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
> }
> }
>
> -/*
> - * Though it's very tempting to unmerge rmap_items from stable tree rather
> - * than check every pte of a given vma, the locking doesn't quite work for
> - * that - an rmap_item is assigned to the stable tree after inserting ksm
> - * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
> - * rmap_items from parent to child at fork time (so as not to waste time
> - * if exit comes before the next scan reaches it).
> - *
> - * Similarly, although we'd like to remove rmap_items (so updating counts
> - * and freeing memory) when unmerging an area, it's easier to leave that
> - * to the next pass of ksmd - consider, for example, how ksmd might be
> - * in cmp_and_merge_page on one of the rmap_items we would be removing.
> - */
> -static int unmerge_ksm_pages(struct vm_area_struct *vma,
> - unsigned long start, unsigned long end, bool lock_vma)
> -{
> - return break_ksm(vma, start, end, lock_vma);
> -}
> -
> static inline
> struct ksm_stable_node *folio_stable_node(const struct folio *folio)
> {
> @@ -1233,7 +1226,7 @@ static int unmerge_and_remove_all_rmap_items(void)
> for_each_vma(vmi, vma) {
> if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
> continue;
> - err = unmerge_ksm_pages(vma,
> + err = break_ksm(vma,
> vma->vm_start, vma->vm_end, false);
Move that all into a single line.
With that
Acked-by: David Hildenbrand <david@redhat.com>
Thanks for tackling this!
--
Cheers
David / dhildenb
On Wed, Oct 29, 2025 at 03:46:36PM +0100, David Hildenbrand wrote:
> On 28.10.25 14:19, Pedro Demarchi Gomes wrote:
> > Function unmerge_ksm_pages() is unnecessary since now break_ksm() walks
> > an address range. So replace it with break_ksm().
> >
> > Suggested-by: David Hildenbrand <david@redhat.com>
> > Signed-off-by: Pedro Demarchi Gomes <pedrodemargomes@gmail.com>
> > ---
> > mm/ksm.c | 39 ++++++++++++++++-----------------------
> > 1 file changed, 16 insertions(+), 23 deletions(-)
> >
> > diff --git a/mm/ksm.c b/mm/ksm.c
> > index 1d1ef0554c7c..18c9e3bda285 100644
> > --- a/mm/ksm.c
> > +++ b/mm/ksm.c
> > @@ -669,6 +669,18 @@ static const struct mm_walk_ops break_ksm_lock_vma_ops = {
> > };
> > /*
> > + * Though it's very tempting to unmerge rmap_items from stable tree rather
> > + * than check every pte of a given vma, the locking doesn't quite work for
> > + * that - an rmap_item is assigned to the stable tree after inserting ksm
> > + * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
> > + * rmap_items from parent to child at fork time (so as not to waste time
> > + * if exit comes before the next scan reaches it).
> > + *
> > + * Similarly, although we'd like to remove rmap_items (so updating counts
> > + * and freeing memory) when unmerging an area, it's easier to leave that
> > + * to the next pass of ksmd - consider, for example, how ksmd might be
> > + * in cmp_and_merge_page on one of the rmap_items we would be removing.
> > + *
> > * We use break_ksm to break COW on a ksm page by triggering unsharing,
> > * such that the ksm page will get replaced by an exclusive anonymous page.
> > *
> > @@ -1077,25 +1089,6 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
> > }
> > }
> > -/*
> > - * Though it's very tempting to unmerge rmap_items from stable tree rather
> > - * than check every pte of a given vma, the locking doesn't quite work for
> > - * that - an rmap_item is assigned to the stable tree after inserting ksm
> > - * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
> > - * rmap_items from parent to child at fork time (so as not to waste time
> > - * if exit comes before the next scan reaches it).
> > - *
> > - * Similarly, although we'd like to remove rmap_items (so updating counts
> > - * and freeing memory) when unmerging an area, it's easier to leave that
> > - * to the next pass of ksmd - consider, for example, how ksmd might be
> > - * in cmp_and_merge_page on one of the rmap_items we would be removing.
> > - */
> > -static int unmerge_ksm_pages(struct vm_area_struct *vma,
> > - unsigned long start, unsigned long end, bool lock_vma)
> > -{
> > - return break_ksm(vma, start, end, lock_vma);
> > -}
> > -
> > static inline
> > struct ksm_stable_node *folio_stable_node(const struct folio *folio)
> > {
> > @@ -1233,7 +1226,7 @@ static int unmerge_and_remove_all_rmap_items(void)
> > for_each_vma(vmi, vma) {
> > if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
> > continue;
> > - err = unmerge_ksm_pages(vma,
> > + err = break_ksm(vma,
> > vma->vm_start, vma->vm_end, false);
>
> Move that all into a single line.
>
Ok.
>
> With that
>
> Acked-by: David Hildenbrand <david@redhat.com>
>
> Thanks for tackling this!
>
Thanks for your comments!
I will send a v2 soon with the corrections.
> --
> Cheers
>
> David / dhildenb
>
>
© 2016 - 2026 Red Hat, Inc.