[PATCH v3 11/11] mm: Use unmap_desc struct for freeing page tables.

Liam R. Howlett posted 11 patches 2 weeks, 2 days ago
[PATCH v3 11/11] mm: Use unmap_desc struct for freeing page tables.
Posted by Liam R. Howlett 2 weeks, 2 days ago
Pass through the unmap_desc to free_pgtables() because it almost has
everything necessary and is already on the stack.

Updates testing code as necessary.

No functional changes intended.

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 mm/internal.h                    |  5 +----
 mm/memory.c                      | 37 ++++++++++++++------------------
 mm/mmap.c                        |  6 +++---
 mm/vma.c                         |  6 ++----
 tools/testing/vma/vma_internal.h |  7 +++---
 5 files changed, 25 insertions(+), 36 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 25a17eea550b8..1cad630f0dcef 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -512,10 +512,7 @@ bool __folio_end_writeback(struct folio *folio);
 void deactivate_file_folio(struct folio *folio);
 void folio_activate(struct folio *folio);
 
-void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
-		   struct vm_area_struct *vma, unsigned long pg_start,
-		   unsigned long pg_end, unsigned long vma_end,
-		   bool mm_wr_locked);
+void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
 
 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
 
diff --git a/mm/memory.c b/mm/memory.c
index 6fd6decc139e9..16b25eff19251 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -373,12 +373,7 @@ void free_pgd_range(struct mmu_gather *tlb,
 /**
  * free_pgtables() - Free a range of page tables
  * @tlb: The mmu gather
- * @mas: The maple state
- * @vma: The first vma
- * @pg_start: The lowest page table address (floor)
- * @pg_end: The highest page table address (ceiling)
- * @vma_end: The highest vma tree search address
- * @mm_wr_locked: boolean indicating if the mm is write locked
+ * @unmap: The unmap_desc
  *
  * Note: pg_start and pg_end are provided to indicate the absolute range of the
  * page tables that should be removed.  This can differ from the vma mappings on
@@ -388,21 +383,21 @@ void free_pgd_range(struct mmu_gather *tlb,
  * The vma_end differs from the pg_end when a dup_mmap() failed and the tree has
  * unrelated data to the mm_struct being torn down.
  */
-void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
-		   struct vm_area_struct *vma, unsigned long pg_start,
-		   unsigned long pg_end, unsigned long vma_end,
-		   bool mm_wr_locked)
+void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
 {
 	struct unlink_vma_file_batch vb;
+	struct ma_state *mas = unmap->mas;
+	struct vm_area_struct *vma = unmap->first;
 
 	/*
 	 * Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and
-	 * may be 0.  Underflow is expected in this case.  Otherwise the
-	 * pagetable end is exclusive.
-	 * vma_end is exclusive.
-	 * The last vma address should never be larger than the pagetable end.
+	 * may be 0.  The underflow here is fine and expected.
+	 * The vma_end is exclusive, which is fine until we use the mas_ instead
+	 * of the vma iterators.
+	 * For freeing the page tables to make sense, the vma_end must be larger
+	 * than the pg_end, so check that after the potential underflow.
 	 */
-	WARN_ON_ONCE(vma_end - 1 > pg_end - 1);
+	WARN_ON_ONCE(unmap->vma_end - 1 > unmap->pg_end - 1);
 
 	tlb_free_vmas(tlb);
 
@@ -410,13 +405,13 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
 		unsigned long addr = vma->vm_start;
 		struct vm_area_struct *next;
 
-		next = mas_find(mas, vma_end - 1);
+		next = mas_find(mas, unmap->tree_end - 1);
 
 		/*
 		 * Hide vma from rmap and truncate_pagecache before freeing
 		 * pgtables
 		 */
-		if (mm_wr_locked)
+		if (unmap->mm_wr_locked)
 			vma_start_write(vma);
 		unlink_anon_vmas(vma);
 
@@ -428,16 +423,16 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
 		 */
 		while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
 			vma = next;
-			next = mas_find(mas, vma_end - 1);
-			if (mm_wr_locked)
+			next = mas_find(mas, unmap->tree_end - 1);
+			if (unmap->mm_wr_locked)
 				vma_start_write(vma);
 			unlink_anon_vmas(vma);
 			unlink_file_vma_batch_add(&vb, vma);
 		}
 		unlink_file_vma_batch_final(&vb);
 
-		free_pgd_range(tlb, addr, vma->vm_end,
-			pg_start, next ? next->vm_start : pg_end);
+		free_pgd_range(tlb, addr, vma->vm_end, unmap->pg_start,
+			       next ? next->vm_start : unmap->pg_end);
 		vma = next;
 	} while (vma);
 }
diff --git a/mm/mmap.c b/mm/mmap.c
index 042b6b4b6ab86..8771b276d63db 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1307,10 +1307,10 @@ void exit_mmap(struct mm_struct *mm)
 	 */
 	mm_flags_set(MMF_OOM_SKIP, mm);
 	mmap_write_lock(mm);
+	unmap.mm_wr_locked = true;
 	mt_clear_in_rcu(&mm->mm_mt);
-	vma_iter_set(&vmi, vma->vm_end);
-	free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
-		      USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true);
+	vma_iter_set(&vmi, unmap.tree_reset);
+	free_pgtables(&tlb, &unmap);
 	tlb_finish_mmu(&tlb);
 
 	/*
diff --git a/mm/vma.c b/mm/vma.c
index 876d2db5329dd..f352d5c722126 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -475,15 +475,13 @@ void remove_vma(struct vm_area_struct *vma)
 void unmap_region(struct unmap_desc *unmap)
 {
 	struct mm_struct *mm = unmap->first->vm_mm;
-	struct ma_state *mas = unmap->mas;
 	struct mmu_gather tlb;
 
 	tlb_gather_mmu(&tlb, mm);
 	update_hiwater_rss(mm);
 	unmap_vmas(&tlb, unmap);
-	mas_set(mas, unmap->tree_reset);
-	free_pgtables(&tlb, mas, unmap->first, unmap->pg_start, unmap->pg_end,
-		      unmap->tree_end, unmap->mm_wr_locked);
+	mas_set(unmap->mas, unmap->tree_reset);
+	free_pgtables(&tlb, unmap);
 	tlb_finish_mmu(&tlb);
 }
 
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index 0b4918aac8d6d..ca4eb563b29ba 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -1137,11 +1137,10 @@ static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
 {
 }
 
-static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
-		   struct vm_area_struct *vma, unsigned long floor,
-		   unsigned long ceiling, unsigned long tree_max,
-		   bool mm_wr_locked)
+static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc)
 {
+	(void)tlb;
+	(void)desc;
 }
 
 static inline void mapping_unmap_writable(struct address_space *mapping)
-- 
2.47.3
Re: [PATCH v3 11/11] mm: Use unmap_desc struct for freeing page tables.
Posted by Chris Mason 1 week, 6 days ago
On Wed, 21 Jan 2026 11:49:46 -0500 "Liam R. Howlett" <Liam.Howlett@oracle.com> wrote:

> Pass through the unmap_desc to free_pgtables() because it almost has
> everything necessary and is already on the stack.
> 
> Updates testing code as necessary.
> 
> No functional changes intended.
>

Hi everyone,

I'm running some more aggressive prompts through linux-next and this one was
flagged.  Apologies in advance if it's way off:

> diff --git a/mm/memory.c b/mm/memory.c
> --- a/mm/memory.c
> +++ b/mm/memory.c

[ ... ]

> -void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
> -		   struct vm_area_struct *vma, unsigned long pg_start,
> -		   unsigned long pg_end, unsigned long vma_end,
> -		   bool mm_wr_locked)
> +void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
>  {
>  	struct unlink_vma_file_batch vb;
> +	struct ma_state *mas = unmap->mas;
> +	struct vm_area_struct *vma = unmap->first;
>
>  	/*
>  	 * Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and
> -	 * may be 0.  Underflow is expected in this case.  Otherwise the
> -	 * pagetable end is exclusive.
> -	 * vma_end is exclusive.
> -	 * The last vma address should never be larger than the pagetable end.
> +	 * may be 0.  The underflow here is fine and expected.
> +	 * The vma_end is exclusive, which is fine until we use the mas_ instead
> +	 * of the vma iterators.
> +	 * For freeing the page tables to make sense, the vma_end must be larger
> +	 * than the pg_end, so check that after the potential underflow.
                           ^^^^

The old comment said "should never be larger" but the new comment says
"must be larger".  The WARN_ON_ONCE fires when vma_end > pg_end, so
"must be larger" appears backwards?

> +	 */
> +	WARN_ON_ONCE(unmap->vma_end - 1 > unmap->pg_end - 1);
                     ^^^^

On the exit_mmap() path, unmap_all_init() sets vma_end = ULONG_MAX while
pg_end = USER_PGTABLES_CEILING.  On ARM, USER_PGTABLES_CEILING is defined
as TASK_SIZE (around 3GB), not zero.

Before this change, exit_mmap() explicitly passed both vma_end and pg_end
as USER_PGTABLES_CEILING, so the WARN_ON_ONCE would never trigger.

After this change, exit_mmap() uses the unmap struct from unmap_all_init()
which sets vma_end = ULONG_MAX but pg_end = USER_PGTABLES_CEILING.
On ARM this means:

  WARN_ON_ONCE(ULONG_MAX - 1 > TASK_SIZE - 1)

This would fire on every process exit on ARM systems.  Is this intended?

> diff --git a/mm/mmap.c b/mm/mmap.c
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -1307,10 +1307,10 @@ void exit_mmap(struct mm_struct *mm)
>  	 */
>  	mm_flags_set(MMF_OOM_SKIP, mm);
>  	mmap_write_lock(mm);
> +	unmap.mm_wr_locked = true;
>  	mt_clear_in_rcu(&mm->mm_mt);
> -	vma_iter_set(&vmi, vma->vm_end);
> -	free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
> -		      USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true);
> +	vma_iter_set(&vmi, unmap.tree_reset);
> +	free_pgtables(&tlb, &unmap);

The old call passed USER_PGTABLES_CEILING for both pg_end and vma_end.
The new code uses unmap_all_init() which sets pg_end = USER_PGTABLES_CEILING
but vma_end = ULONG_MAX.  This changes the semantics of the WARN_ON_ONCE
check in free_pgtables().