[PATCH v1 6/9] mm: Change dup_mmap() recovery

Liam R. Howlett posted 9 patches 3 weeks, 2 days ago
[PATCH v1 6/9] mm: Change dup_mmap() recovery
Posted by Liam R. Howlett 3 weeks, 2 days ago
When the dup_mmap() fails during the vma duplication or setup, don't
write the XA_ZERO entry in the vma tree.  Instead, destroy the tree and
free the new resources, leaving an empty vma tree.

Using XA_ZERO introduced races where the vma could be found between
dup_mmap() dropping all locks and exit_mmap() taking the locks.  The
race can occur because the mm can be reached through the other trees
via successfully copied vmas and other methods such as the swapoff code.

XA_ZERO was marking the location to stop vma removal and pagetable
freeing.  The newly created arguments to the unmap_vmas() and
free_pgtables() serve this function.

Replacing the XA_ZERO entry use with the new argument list also means
the checks for xa_is_zero() are no longer necessary so these are also
removed.

Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 mm/memory.c |  6 +-----
 mm/mmap.c   | 42 +++++++++++++++++++++++++++++++-----------
 2 files changed, 32 insertions(+), 16 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 24716b3713f66..829cd94950182 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -408,8 +408,6 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
 		 * be 0.  This will underflow and is okay.
 		 */
 		next = mas_find(mas, tree_max - 1);
-		if (unlikely(xa_is_zero(next)))
-			next = NULL;
 
 		/*
 		 * Hide vma from rmap and truncate_pagecache before freeing
@@ -428,8 +426,6 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
 		while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
 			vma = next;
 			next = mas_find(mas, tree_max - 1);
-			if (unlikely(xa_is_zero(next)))
-				next = NULL;
 			if (mm_wr_locked)
 				vma_start_write(vma);
 			unlink_anon_vmas(vma);
@@ -2129,7 +2125,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
 				 mm_wr_locked);
 		hugetlb_zap_end(vma, &details);
 		vma = mas_find(mas, tree_end - 1);
-	} while (vma && likely(!xa_is_zero(vma)));
+	} while (vma);
 	mmu_notifier_invalidate_range_end(&range);
 }
 
diff --git a/mm/mmap.c b/mm/mmap.c
index 0f4808f135fe6..aa4770b8d7f1e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1288,7 +1288,7 @@ void exit_mmap(struct mm_struct *mm)
 	arch_exit_mmap(mm);
 
 	vma = vma_next(&vmi);
-	if (!vma || unlikely(xa_is_zero(vma))) {
+	if (!vma) {
 		/* Can happen if dup_mmap() received an OOM */
 		mmap_read_unlock(mm);
 		mmap_write_lock(mm);
@@ -1858,20 +1858,40 @@ __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
 		ksm_fork(mm, oldmm);
 		khugepaged_fork(mm, oldmm);
 	} else {
+		unsigned long max;
 
 		/*
-		 * The entire maple tree has already been duplicated. If the
-		 * mmap duplication fails, mark the failure point with
-		 * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
-		 * stop releasing VMAs that have not been duplicated after this
-		 * point.
+		 * The entire maple tree has already been duplicated, but
+		 * replacing the vmas failed at mpnt (which could be NULL if
+		 * all were allocated but the last vma was not fully set up).
+		 * Use the start address of the failure point to clean up the
+		 * partially initialized tree.
 		 */
-		if (mpnt) {
-			mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
-			mas_store(&vmi.mas, XA_ZERO_ENTRY);
-			/* Avoid OOM iterating a broken tree */
-			mm_flags_set(MMF_OOM_SKIP, mm);
+		if (!mm->map_count) {
+			/* zero vmas were written to the new tree. */
+			max = 0;
+		} else if (mpnt) {
+			/* partial tree failure */
+			max = mpnt->vm_start;
+		} else {
+			/* All vmas were written to the new tree */
+			max = ULONG_MAX;
 		}
+
+		/* Hide mm from oom killer because the memory is being freed */
+		mm_flags_set(MMF_OOM_SKIP, mm);
+		if (max) {
+			vma_iter_set(&vmi, 0);
+			tmp = vma_next(&vmi);
+			flush_cache_mm(mm);
+			unmap_region(&vmi.mas, /* vma = */ tmp,
+				     /*vma_min = */ 0, /* vma_max = */ max,
+				     /* pg_max = */ max, /* prev = */ NULL,
+				     /* next = */ NULL);
+			charge = tear_down_vmas(mm, &vmi, tmp, max);
+			vm_unacct_memory(charge);
+		}
+		__mt_destroy(&mm->mm_mt);
 		/*
 		 * The mm_struct is going to exit, but the locks will be dropped
 		 * first.  Set the mm_struct as unstable is advisable as it is
-- 
2.47.2
Re: [PATCH v1 6/9] mm: Change dup_mmap() recovery
Posted by David Hildenbrand 3 weeks ago
On 09.09.25 21:09, Liam R. Howlett wrote:
> When the dup_mmap() fails during the vma duplication or setup, don't
> write the XA_ZERO entry in the vma tree.  Instead, destroy the tree and
> free the new resources, leaving an empty vma tree.
> 
> Using XA_ZERO introduced races where the vma could be found between
> dup_mmap() dropping all locks and exit_mmap() taking the locks.  The
> race can occur because the mm can be reached through the other trees
> via successfully copied vmas and other methods such as the swapoff code.
> 
> XA_ZERO was marking the location to stop vma removal and pagetable
> freeing.  The newly created arguments to the unmap_vmas() and
> free_pgtables() serve this function.
> 
> Replacing the XA_ZERO entry use with the new argument list also means
> the checks for xa_is_zero() are no longer necessary so these are also
> removed.
> 
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> ---
>   mm/memory.c |  6 +-----
>   mm/mmap.c   | 42 +++++++++++++++++++++++++++++++-----------
>   2 files changed, 32 insertions(+), 16 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index 24716b3713f66..829cd94950182 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -408,8 +408,6 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
>   		 * be 0.  This will underflow and is okay.
>   		 */
>   		next = mas_find(mas, tree_max - 1);
> -		if (unlikely(xa_is_zero(next)))
> -			next = NULL;
>   
>   		/*
>   		 * Hide vma from rmap and truncate_pagecache before freeing
> @@ -428,8 +426,6 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
>   		while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
>   			vma = next;
>   			next = mas_find(mas, tree_max - 1);
> -			if (unlikely(xa_is_zero(next)))
> -				next = NULL;
>   			if (mm_wr_locked)
>   				vma_start_write(vma);
>   			unlink_anon_vmas(vma);
> @@ -2129,7 +2125,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
>   				 mm_wr_locked);
>   		hugetlb_zap_end(vma, &details);
>   		vma = mas_find(mas, tree_end - 1);
> -	} while (vma && likely(!xa_is_zero(vma)));
> +	} while (vma);
>   	mmu_notifier_invalidate_range_end(&range);
>   }
>   
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 0f4808f135fe6..aa4770b8d7f1e 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -1288,7 +1288,7 @@ void exit_mmap(struct mm_struct *mm)
>   	arch_exit_mmap(mm);
>   
>   	vma = vma_next(&vmi);
> -	if (!vma || unlikely(xa_is_zero(vma))) {
> +	if (!vma) {
>   		/* Can happen if dup_mmap() received an OOM */
>   		mmap_read_unlock(mm);
>   		mmap_write_lock(mm);
> @@ -1858,20 +1858,40 @@ __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
>   		ksm_fork(mm, oldmm);
>   		khugepaged_fork(mm, oldmm);
>   	} else {
> +		unsigned long max;
>   
>   		/*
> -		 * The entire maple tree has already been duplicated. If the
> -		 * mmap duplication fails, mark the failure point with
> -		 * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
> -		 * stop releasing VMAs that have not been duplicated after this
> -		 * point.
> +		 * The entire maple tree has already been duplicated, but
> +		 * replacing the vmas failed at mpnt (which could be NULL if
> +		 * all were allocated but the last vma was not fully set up).
> +		 * Use the start address of the failure point to clean up the
> +		 * partially initialized tree.
>   		 */
> -		if (mpnt) {
> -			mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
> -			mas_store(&vmi.mas, XA_ZERO_ENTRY);
> -			/* Avoid OOM iterating a broken tree */
> -			mm_flags_set(MMF_OOM_SKIP, mm);
> +		if (!mm->map_count) {
> +			/* zero vmas were written to the new tree. */
> +			max = 0;
> +		} else if (mpnt) {
> +			/* partial tree failure */
> +			max = mpnt->vm_start;
> +		} else {
> +			/* All vmas were written to the new tree */
> +			max = ULONG_MAX;
>   		}
> +
> +		/* Hide mm from oom killer because the memory is being freed */
> +		mm_flags_set(MMF_OOM_SKIP, mm);
> +		if (max) {
> +			vma_iter_set(&vmi, 0);
> +			tmp = vma_next(&vmi);
> +			flush_cache_mm(mm);
> +			unmap_region(&vmi.mas, /* vma = */ tmp,
> +				     /*vma_min = */ 0, /* vma_max = */ max,
> +				     /* pg_max = */ max, /* prev = */ NULL,
> +				     /* next = */ NULL);
> +			charge = tear_down_vmas(mm, &vmi, tmp, max);
> +			vm_unacct_memory(charge);
> +		}
> +		__mt_destroy(&mm->mm_mt);

Usually comment about just calling things start/end, maybe with prefix 
if required.

Apart from that, LGTM.

-- 
Cheers

David / dhildenb
Re: [PATCH v1 6/9] mm: Change dup_mmap() recovery
Posted by Lorenzo Stoakes 3 weeks ago
On Tue, Sep 09, 2025 at 03:09:42PM -0400, Liam R. Howlett wrote:
> When the dup_mmap() fails during the vma duplication or setup, don't
> write the XA_ZERO entry in the vma tree.  Instead, destroy the tree and
> free the new resources, leaving an empty vma tree.
>
> Using XA_ZERO introduced races where the vma could be found between
> dup_mmap() dropping all locks and exit_mmap() taking the locks.  The
> race can occur because the mm can be reached through the other trees
> via successfully copied vmas and other methods such as the swapoff code.
>
> XA_ZERO was marking the location to stop vma removal and pagetable
> freeing.  The newly created arguments to the unmap_vmas() and
> free_pgtables() serve this function.
>
> Replacing the XA_ZERO entry use with the new argument list also means
> the checks for xa_is_zero() are no longer necessary so these are also
> removed.
>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>

LGTM, so:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

> ---
>  mm/memory.c |  6 +-----
>  mm/mmap.c   | 42 +++++++++++++++++++++++++++++++-----------
>  2 files changed, 32 insertions(+), 16 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 24716b3713f66..829cd94950182 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -408,8 +408,6 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
>  		 * be 0.  This will underflow and is okay.
>  		 */
>  		next = mas_find(mas, tree_max - 1);
> -		if (unlikely(xa_is_zero(next)))
> -			next = NULL;
>
>  		/*
>  		 * Hide vma from rmap and truncate_pagecache before freeing
> @@ -428,8 +426,6 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
>  		while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
>  			vma = next;
>  			next = mas_find(mas, tree_max - 1);
> -			if (unlikely(xa_is_zero(next)))
> -				next = NULL;
>  			if (mm_wr_locked)
>  				vma_start_write(vma);
>  			unlink_anon_vmas(vma);
> @@ -2129,7 +2125,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
>  				 mm_wr_locked);
>  		hugetlb_zap_end(vma, &details);
>  		vma = mas_find(mas, tree_end - 1);
> -	} while (vma && likely(!xa_is_zero(vma)));
> +	} while (vma);
>  	mmu_notifier_invalidate_range_end(&range);
>  }
>
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 0f4808f135fe6..aa4770b8d7f1e 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -1288,7 +1288,7 @@ void exit_mmap(struct mm_struct *mm)
>  	arch_exit_mmap(mm);
>
>  	vma = vma_next(&vmi);
> -	if (!vma || unlikely(xa_is_zero(vma))) {
> +	if (!vma) {
>  		/* Can happen if dup_mmap() received an OOM */
>  		mmap_read_unlock(mm);
>  		mmap_write_lock(mm);
> @@ -1858,20 +1858,40 @@ __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
>  		ksm_fork(mm, oldmm);
>  		khugepaged_fork(mm, oldmm);
>  	} else {
> +		unsigned long max;
>
>  		/*
> -		 * The entire maple tree has already been duplicated. If the
> -		 * mmap duplication fails, mark the failure point with
> -		 * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
> -		 * stop releasing VMAs that have not been duplicated after this
> -		 * point.
> +		 * The entire maple tree has already been duplicated, but
> +		 * replacing the vmas failed at mpnt (which could be NULL if
> +		 * all were allocated but the last vma was not fully set up).
> +		 * Use the start address of the failure point to clean up the
> +		 * partially initialized tree.
>  		 */

Thanks for this! Great comment.

> -		if (mpnt) {
> -			mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
> -			mas_store(&vmi.mas, XA_ZERO_ENTRY);
> -			/* Avoid OOM iterating a broken tree */
> -			mm_flags_set(MMF_OOM_SKIP, mm);
> +		if (!mm->map_count) {
> +			/* zero vmas were written to the new tree. */
> +			max = 0;
> +		} else if (mpnt) {
> +			/* partial tree failure */
> +			max = mpnt->vm_start;
> +		} else {
> +			/* All vmas were written to the new tree */
> +			max = ULONG_MAX;
>  		}
> +
> +		/* Hide mm from oom killer because the memory is being freed */
> +		mm_flags_set(MMF_OOM_SKIP, mm);
> +		if (max) {
> +			vma_iter_set(&vmi, 0);
> +			tmp = vma_next(&vmi);
> +			flush_cache_mm(mm);
> +			unmap_region(&vmi.mas, /* vma = */ tmp,
> +				     /*vma_min = */ 0, /* vma_max = */ max,
> +				     /* pg_max = */ max, /* prev = */ NULL,
> +				     /* next = */ NULL);

Thanks! This really helps.

> +			charge = tear_down_vmas(mm, &vmi, tmp, max);
> +			vm_unacct_memory(charge);
> +		}
> +		__mt_destroy(&mm->mm_mt);
>  		/*
>  		 * The mm_struct is going to exit, but the locks will be dropped
>  		 * first.  Set the mm_struct as unstable is advisable as it is
> --
> 2.47.2
>
Re: [PATCH v1 6/9] mm: Change dup_mmap() recovery
Posted by Pedro Falcato 3 weeks, 1 day ago
On Tue, Sep 09, 2025 at 03:09:42PM -0400, Liam R. Howlett wrote:
> When the dup_mmap() fails during the vma duplication or setup, don't
> write the XA_ZERO entry in the vma tree.  Instead, destroy the tree and
> free the new resources, leaving an empty vma tree.
> 
> Using XA_ZERO introduced races where the vma could be found between
> dup_mmap() dropping all locks and exit_mmap() taking the locks.  The
> race can occur because the mm can be reached through the other trees
> via successfully copied vmas and other methods such as the swapoff code.
> 
> XA_ZERO was marking the location to stop vma removal and pagetable
> freeing.  The newly created arguments to the unmap_vmas() and
> free_pgtables() serve this function.
> 
> Replacing the XA_ZERO entry use with the new argument list also means
> the checks for xa_is_zero() are no longer necessary so these are also
> removed.
> 
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>

Reviewed-by: Pedro Falcato <pfalcato@suse.de>

-- 
Pedro
Re: [PATCH v1 6/9] mm: Change dup_mmap() recovery
Posted by Suren Baghdasaryan 3 weeks, 2 days ago
On Tue, Sep 9, 2025 at 12:11 PM Liam R. Howlett <Liam.Howlett@oracle.com> wrote:
>
> When the dup_mmap() fails during the vma duplication or setup, don't
> write the XA_ZERO entry in the vma tree.  Instead, destroy the tree and
> free the new resources, leaving an empty vma tree.
>
> Using XA_ZERO introduced races where the vma could be found between
> dup_mmap() dropping all locks and exit_mmap() taking the locks.  The
> race can occur because the mm can be reached through the other trees
> via successfully copied vmas and other methods such as the swapoff code.
>
> XA_ZERO was marking the location to stop vma removal and pagetable
> freeing.  The newly created arguments to the unmap_vmas() and
> free_pgtables() serve this function.
>
> Replacing the XA_ZERO entry use with the new argument list also means
> the checks for xa_is_zero() are no longer necessary so these are also
> removed.
>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>

Reviewed-by: Suren Baghdasaryan <surenb@google.com>

> ---
>  mm/memory.c |  6 +-----
>  mm/mmap.c   | 42 +++++++++++++++++++++++++++++++-----------
>  2 files changed, 32 insertions(+), 16 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 24716b3713f66..829cd94950182 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -408,8 +408,6 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
>                  * be 0.  This will underflow and is okay.
>                  */
>                 next = mas_find(mas, tree_max - 1);
> -               if (unlikely(xa_is_zero(next)))
> -                       next = NULL;
>
>                 /*
>                  * Hide vma from rmap and truncate_pagecache before freeing
> @@ -428,8 +426,6 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
>                 while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
>                         vma = next;
>                         next = mas_find(mas, tree_max - 1);
> -                       if (unlikely(xa_is_zero(next)))
> -                               next = NULL;
>                         if (mm_wr_locked)
>                                 vma_start_write(vma);
>                         unlink_anon_vmas(vma);
> @@ -2129,7 +2125,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
>                                  mm_wr_locked);
>                 hugetlb_zap_end(vma, &details);
>                 vma = mas_find(mas, tree_end - 1);
> -       } while (vma && likely(!xa_is_zero(vma)));
> +       } while (vma);
>         mmu_notifier_invalidate_range_end(&range);
>  }
>
> diff --git a/mm/mmap.c b/mm/mmap.c
> index 0f4808f135fe6..aa4770b8d7f1e 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -1288,7 +1288,7 @@ void exit_mmap(struct mm_struct *mm)
>         arch_exit_mmap(mm);
>
>         vma = vma_next(&vmi);
> -       if (!vma || unlikely(xa_is_zero(vma))) {
> +       if (!vma) {
>                 /* Can happen if dup_mmap() received an OOM */
>                 mmap_read_unlock(mm);
>                 mmap_write_lock(mm);
> @@ -1858,20 +1858,40 @@ __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
>                 ksm_fork(mm, oldmm);
>                 khugepaged_fork(mm, oldmm);
>         } else {
> +               unsigned long max;
>
>                 /*
> -                * The entire maple tree has already been duplicated. If the
> -                * mmap duplication fails, mark the failure point with
> -                * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
> -                * stop releasing VMAs that have not been duplicated after this
> -                * point.
> +                * The entire maple tree has already been duplicated, but
> +                * replacing the vmas failed at mpnt (which could be NULL if
> +                * all were allocated but the last vma was not fully set up).
> +                * Use the start address of the failure point to clean up the
> +                * partially initialized tree.
>                  */
> -               if (mpnt) {
> -                       mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
> -                       mas_store(&vmi.mas, XA_ZERO_ENTRY);
> -                       /* Avoid OOM iterating a broken tree */
> -                       mm_flags_set(MMF_OOM_SKIP, mm);
> +               if (!mm->map_count) {
> +                       /* zero vmas were written to the new tree. */
> +                       max = 0;
> +               } else if (mpnt) {
> +                       /* partial tree failure */
> +                       max = mpnt->vm_start;
> +               } else {
> +                       /* All vmas were written to the new tree */

So, the cleanup for this case used to be handled by exit_mmap(). I
think it's ok to do it here but the changelog should mention this
change as well IMHO.

> +                       max = ULONG_MAX;
>                 }
> +
> +               /* Hide mm from oom killer because the memory is being freed */
> +               mm_flags_set(MMF_OOM_SKIP, mm);
> +               if (max) {
> +                       vma_iter_set(&vmi, 0);
> +                       tmp = vma_next(&vmi);
> +                       flush_cache_mm(mm);
> +                       unmap_region(&vmi.mas, /* vma = */ tmp,
> +                                    /*vma_min = */ 0, /* vma_max = */ max,
> +                                    /* pg_max = */ max, /* prev = */ NULL,
> +                                    /* next = */ NULL);
> +                       charge = tear_down_vmas(mm, &vmi, tmp, max);
> +                       vm_unacct_memory(charge);
> +               }
> +               __mt_destroy(&mm->mm_mt);
>                 /*
>                  * The mm_struct is going to exit, but the locks will be dropped
>                  * first.  Set the mm_struct as unstable is advisable as it is
> --
> 2.47.2
>