[PATCH v1 5/9] mm/vma: Add page table limit to unmap_region()

Liam R. Howlett posted 9 patches 3 weeks, 2 days ago
[PATCH v1 5/9] mm/vma: Add page table limit to unmap_region()
Posted by Liam R. Howlett 3 weeks, 2 days ago
The unmap_region() calls need to pass through the page table limit for a
future patch.

No functional changes intended.

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 mm/vma.c | 5 +++--
 mm/vma.h | 2 +-
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/mm/vma.c b/mm/vma.c
index 1bae142bbc0f1..4c850ffd83a4b 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -474,7 +474,7 @@ void remove_vma(struct vm_area_struct *vma)
  * Called with the mm semaphore held.
  */
 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
-		unsigned long vma_min, unsigned long vma_max,
+		unsigned long vma_min, unsigned long vma_max, unsigned long pg_max,
 		struct vm_area_struct *prev, struct vm_area_struct *next)
 {
 	struct mm_struct *mm = vma->vm_mm;
@@ -487,7 +487,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
 	mas_set(mas, vma->vm_end);
 	free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
 		      next ? next->vm_start : USER_PGTABLES_CEILING,
-		      next ? next->vm_start : USER_PGTABLES_CEILING,
+		      pg_max,
 		      /* mm_wr_locked = */ true);
 	tlb_finish_mmu(&tlb);
 }
@@ -2420,6 +2420,7 @@ static int __mmap_new_file_vma(struct mmap_state *map,
 		vma_iter_set(vmi, vma->vm_end);
 		/* Undo any partial mapping done by a device driver. */
 		unmap_region(&vmi->mas, vma, vma->vm_start, vma->vm_end,
+			     map->next ? map->next->vm_start : USER_PGTABLES_CEILING,
 			     map->prev, map->next);
 
 		return error;
diff --git a/mm/vma.h b/mm/vma.h
index a9d0cef684ddb..b0ebc81d5862e 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -261,7 +261,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
 void remove_vma(struct vm_area_struct *vma);
 
 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
-		unsigned long min, unsigned long max,
+		unsigned long min, unsigned long max, unsigned long pg_max,
 		struct vm_area_struct *prev, struct vm_area_struct *next);
 
 /* We are about to modify the VMA's flags. */
-- 
2.47.2
Re: [PATCH v1 5/9] mm/vma: Add page table limit to unmap_region()
Posted by Pedro Falcato 3 weeks, 1 day ago
On Tue, Sep 09, 2025 at 03:09:41PM -0400, Liam R. Howlett wrote:
> The unmap_region() calls need to pass through the page table limit for a
> future patch.
> 
> No functional changes intended.
> 
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>

Reviewed-by: Pedro Falcato <pfalcato@suse.de>

-- 
Pedro
Re: [PATCH v1 5/9] mm/vma: Add page table limit to unmap_region()
Posted by Suren Baghdasaryan 3 weeks, 2 days ago
On Tue, Sep 9, 2025 at 12:10 PM Liam R. Howlett <Liam.Howlett@oracle.com> wrote:
>
> The unmap_region() calls need to pass through the page table limit for a
> future patch.
>
> No functional changes intended.
>
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> ---
>  mm/vma.c | 5 +++--
>  mm/vma.h | 2 +-
>  2 files changed, 4 insertions(+), 3 deletions(-)
>
> diff --git a/mm/vma.c b/mm/vma.c
> index 1bae142bbc0f1..4c850ffd83a4b 100644
> --- a/mm/vma.c
> +++ b/mm/vma.c
> @@ -474,7 +474,7 @@ void remove_vma(struct vm_area_struct *vma)
>   * Called with the mm semaphore held.
>   */
>  void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
> -               unsigned long vma_min, unsigned long vma_max,
> +               unsigned long vma_min, unsigned long vma_max, unsigned long pg_max,
>                 struct vm_area_struct *prev, struct vm_area_struct *next)
>  {
>         struct mm_struct *mm = vma->vm_mm;
> @@ -487,7 +487,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
>         mas_set(mas, vma->vm_end);
>         free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
>                       next ? next->vm_start : USER_PGTABLES_CEILING,
> -                     next ? next->vm_start : USER_PGTABLES_CEILING,
> +                     pg_max,

Hmm. The free_pgtables() parameters were advertised as:

@floor: The lowest page table address
@ceiling: The highest page table address
@tree_max: The highest tree search address

but here tree_max=pg_max. I would expect pg_max to mean "The highest
page table address", IOW we should have ceiling=pg_max. Either the
order of the parameters is wrong here or the names are misleading.
I also think in the previous patch we should have renamed
free_pgtables() parameters to vma_min, vma_max and pg_max like here
for consistency and to avoid confusion.

>                       /* mm_wr_locked = */ true);
>         tlb_finish_mmu(&tlb);
>  }
> @@ -2420,6 +2420,7 @@ static int __mmap_new_file_vma(struct mmap_state *map,
>                 vma_iter_set(vmi, vma->vm_end);
>                 /* Undo any partial mapping done by a device driver. */
>                 unmap_region(&vmi->mas, vma, vma->vm_start, vma->vm_end,
> +                            map->next ? map->next->vm_start : USER_PGTABLES_CEILING,
>                              map->prev, map->next);
>
>                 return error;
> diff --git a/mm/vma.h b/mm/vma.h
> index a9d0cef684ddb..b0ebc81d5862e 100644
> --- a/mm/vma.h
> +++ b/mm/vma.h
> @@ -261,7 +261,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
>  void remove_vma(struct vm_area_struct *vma);
>
>  void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
> -               unsigned long min, unsigned long max,
> +               unsigned long min, unsigned long max, unsigned long pg_max,
>                 struct vm_area_struct *prev, struct vm_area_struct *next);
>
>  /* We are about to modify the VMA's flags. */
> --
> 2.47.2
>