The ceiling and tree search limit need to be different arguments for the
future change in the failed fork attempt.
No functional changes intended.
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
mm/internal.h | 4 +++-
mm/memory.c | 7 ++++---
mm/mmap.c | 2 +-
mm/vma.c | 3 ++-
4 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 45b725c3dc030..f9a278ac76d83 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -444,7 +444,9 @@ void folio_activate(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long floor,
- unsigned long ceiling, bool mm_wr_locked);
+ unsigned long ceiling, unsigned long tree_max,
+ bool mm_wr_locked);
+
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
struct zap_details;
diff --git a/mm/memory.c b/mm/memory.c
index 0ba4f6b718471..3346514562bba 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -371,7 +371,8 @@ void free_pgd_range(struct mmu_gather *tlb,
void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *vma, unsigned long floor,
- unsigned long ceiling, bool mm_wr_locked)
+ unsigned long ceiling, unsigned long tree_max,
+ bool mm_wr_locked)
{
struct unlink_vma_file_batch vb;
@@ -385,7 +386,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
* Note: USER_PGTABLES_CEILING may be passed as ceiling and may
* be 0. This will underflow and is okay.
*/
- next = mas_find(mas, ceiling - 1);
+ next = mas_find(mas, tree_max - 1);
if (unlikely(xa_is_zero(next)))
next = NULL;
@@ -405,7 +406,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
vma = next;
- next = mas_find(mas, ceiling - 1);
+ next = mas_find(mas, tree_max - 1);
if (unlikely(xa_is_zero(next)))
next = NULL;
if (mm_wr_locked)
diff --git a/mm/mmap.c b/mm/mmap.c
index 0995a48b46d59..eba2bc81bc749 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1311,7 +1311,7 @@ void exit_mmap(struct mm_struct *mm)
mt_clear_in_rcu(&mm->mm_mt);
vma_iter_set(&vmi, vma->vm_end);
free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
- USER_PGTABLES_CEILING, true);
+ USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true);
tlb_finish_mmu(&tlb);
/*
diff --git a/mm/vma.c b/mm/vma.c
index fd270345c25d3..aa75ca8618609 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -486,6 +486,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
/* mm_wr_locked = */ true);
mas_set(mas, vma->vm_end);
free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+ next ? next->vm_start : USER_PGTABLES_CEILING,
next ? next->vm_start : USER_PGTABLES_CEILING,
/* mm_wr_locked = */ true);
tlb_finish_mmu(&tlb);
@@ -1232,7 +1233,7 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
mas_set(mas_detach, 1);
/* start and end may be different if there is no prev or next vma. */
free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
- vms->unmap_end, mm_wr_locked);
+ vms->unmap_end, vms->unmap_end, mm_wr_locked);
tlb_finish_mmu(&tlb);
vms->clear_ptes = false;
}
--
2.47.2
On Fri, Aug 15, 2025 at 03:10:29PM -0400, Liam R. Howlett wrote: > The ceiling and tree search limit need to be different arguments for the > future change in the failed fork attempt. > > No functional changes intended. > > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> (Obv. in addition to comment about broken VMA tests :P) I guess intent is that if we discover any page tables beyond tree_max then we ought to just wipe them all out so, in effect, we don't consider mappings at or past tree_max to be valid? I feel like we need a comment to this effect as this is confusing as it is. Could we add a kerneldoc comment for free_pgtables() spelling this out? > --- > mm/internal.h | 4 +++- > mm/memory.c | 7 ++++--- > mm/mmap.c | 2 +- > mm/vma.c | 3 ++- > 4 files changed, 10 insertions(+), 6 deletions(-) > > diff --git a/mm/internal.h b/mm/internal.h > index 45b725c3dc030..f9a278ac76d83 100644 > --- a/mm/internal.h > +++ b/mm/internal.h > @@ -444,7 +444,9 @@ void folio_activate(struct folio *folio); > > void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > struct vm_area_struct *start_vma, unsigned long floor, > - unsigned long ceiling, bool mm_wr_locked); > + unsigned long ceiling, unsigned long tree_max, > + bool mm_wr_locked); > + > void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); > > struct zap_details; > diff --git a/mm/memory.c b/mm/memory.c > index 0ba4f6b718471..3346514562bba 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -371,7 +371,8 @@ void free_pgd_range(struct mmu_gather *tlb, > > void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > struct vm_area_struct *vma, unsigned long floor, > - unsigned long ceiling, bool mm_wr_locked) > + unsigned long ceiling, unsigned long tree_max, > + bool mm_wr_locked) > { > struct unlink_vma_file_batch vb; > > @@ -385,7 +386,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > * Note: USER_PGTABLES_CEILING may be passed as ceiling and may > * be 0. This will underflow and is okay. > */ > - next = mas_find(mas, ceiling - 1); > + next = mas_find(mas, tree_max - 1); Do we need to put some sort of sanity checks in to make sure tree_max <= ceiling (though this 0 case is a pain... so I guess tree_max - 1 <= ceiling - 1?) > if (unlikely(xa_is_zero(next))) > next = NULL; > > @@ -405,7 +406,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > */ > while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { > vma = next; > - next = mas_find(mas, ceiling - 1); > + next = mas_find(mas, tree_max - 1); > if (unlikely(xa_is_zero(next))) > next = NULL; > if (mm_wr_locked) > diff --git a/mm/mmap.c b/mm/mmap.c > index 0995a48b46d59..eba2bc81bc749 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -1311,7 +1311,7 @@ void exit_mmap(struct mm_struct *mm) > mt_clear_in_rcu(&mm->mm_mt); > vma_iter_set(&vmi, vma->vm_end); > free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, > - USER_PGTABLES_CEILING, true); > + USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true); > tlb_finish_mmu(&tlb); > > /* > diff --git a/mm/vma.c b/mm/vma.c > index fd270345c25d3..aa75ca8618609 100644 > --- a/mm/vma.c > +++ b/mm/vma.c > @@ -486,6 +486,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, > /* mm_wr_locked = */ true); > mas_set(mas, vma->vm_end); > free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, > + next ? next->vm_start : USER_PGTABLES_CEILING, > next ? next->vm_start : USER_PGTABLES_CEILING, > /* mm_wr_locked = */ true); > tlb_finish_mmu(&tlb); > @@ -1232,7 +1233,7 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms, > mas_set(mas_detach, 1); > /* start and end may be different if there is no prev or next vma. */ > free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, > - vms->unmap_end, mm_wr_locked); > + vms->unmap_end, vms->unmap_end, mm_wr_locked); > tlb_finish_mmu(&tlb); > vms->clear_ptes = false; > } > -- > 2.47.2 >
* Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250819 15:14]: > On Fri, Aug 15, 2025 at 03:10:29PM -0400, Liam R. Howlett wrote: > > The ceiling and tree search limit need to be different arguments for the > > future change in the failed fork attempt. > > > > No functional changes intended. > > > > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> > > (Obv. in addition to comment about broken VMA tests :P) > > I guess intent is that if we discover any page tables beyond tree_max then > we ought to just wipe them all out so, in effect, we don't consider > mappings at or past tree_max to be valid? Actually... there are some archs that map outside the vma and they are valid.. I think mips? and I think lower, but yeah.. it's needed. This is why prev->vm_end and next->vm_start are used as page table limits, afaik. This is a serious annoyance because it frequently adds walks that are infrequently necessary to the vma tree. > > I feel like we need a comment to this effect as this is confusing as it is. > > Could we add a kerneldoc comment for free_pgtables() spelling this out? I'll add a note here, but confusion will probably increase. I'll add a note about the tree max as well. > > > --- > > mm/internal.h | 4 +++- > > mm/memory.c | 7 ++++--- > > mm/mmap.c | 2 +- > > mm/vma.c | 3 ++- > > 4 files changed, 10 insertions(+), 6 deletions(-) > > > > diff --git a/mm/internal.h b/mm/internal.h > > index 45b725c3dc030..f9a278ac76d83 100644 > > --- a/mm/internal.h > > +++ b/mm/internal.h > > @@ -444,7 +444,9 @@ void folio_activate(struct folio *folio); > > > > void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > > struct vm_area_struct *start_vma, unsigned long floor, > > - unsigned long ceiling, bool mm_wr_locked); > > + unsigned long ceiling, unsigned long tree_max, > > + bool mm_wr_locked); > > + > > void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); > > > > struct zap_details; > > diff --git a/mm/memory.c b/mm/memory.c > > index 0ba4f6b718471..3346514562bba 100644 > > --- a/mm/memory.c > > +++ b/mm/memory.c > > @@ -371,7 +371,8 @@ void free_pgd_range(struct mmu_gather *tlb, > > > > void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > > struct vm_area_struct *vma, unsigned long floor, > > - unsigned long ceiling, bool mm_wr_locked) > > + unsigned long ceiling, unsigned long tree_max, > > + bool mm_wr_locked) > > { > > struct unlink_vma_file_batch vb; > > > > @@ -385,7 +386,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > > * Note: USER_PGTABLES_CEILING may be passed as ceiling and may > > * be 0. This will underflow and is okay. > > */ > > - next = mas_find(mas, ceiling - 1); > > + next = mas_find(mas, tree_max - 1); > > Do we need to put some sort of sanity checks in to make sure tree_max <= ceiling > (though this 0 case is a pain... so I guess tree_max - 1 <= ceiling - 1?) Sure! > > > if (unlikely(xa_is_zero(next))) > > next = NULL; > > > > @@ -405,7 +406,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > > */ > > while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { > > vma = next; > > - next = mas_find(mas, ceiling - 1); > > + next = mas_find(mas, tree_max - 1); > > if (unlikely(xa_is_zero(next))) > > next = NULL; > > if (mm_wr_locked) > > diff --git a/mm/mmap.c b/mm/mmap.c > > index 0995a48b46d59..eba2bc81bc749 100644 > > --- a/mm/mmap.c > > +++ b/mm/mmap.c > > @@ -1311,7 +1311,7 @@ void exit_mmap(struct mm_struct *mm) > > mt_clear_in_rcu(&mm->mm_mt); > > vma_iter_set(&vmi, vma->vm_end); > > free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, > > - USER_PGTABLES_CEILING, true); > > + USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true); > > tlb_finish_mmu(&tlb); > > > > /* > > diff --git a/mm/vma.c b/mm/vma.c > > index fd270345c25d3..aa75ca8618609 100644 > > --- a/mm/vma.c > > +++ b/mm/vma.c > > @@ -486,6 +486,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, > > /* mm_wr_locked = */ true); > > mas_set(mas, vma->vm_end); > > free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, > > + next ? next->vm_start : USER_PGTABLES_CEILING, > > next ? next->vm_start : USER_PGTABLES_CEILING, > > /* mm_wr_locked = */ true); > > tlb_finish_mmu(&tlb); > > @@ -1232,7 +1233,7 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms, > > mas_set(mas_detach, 1); > > /* start and end may be different if there is no prev or next vma. */ > > free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, > > - vms->unmap_end, mm_wr_locked); > > + vms->unmap_end, vms->unmap_end, mm_wr_locked); > > tlb_finish_mmu(&tlb); > > vms->clear_ptes = false; > > } > > -- > > 2.47.2 > >
On Wed, Sep 03, 2025 at 04:19:04PM -0400, Liam R. Howlett wrote: > * Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250819 15:14]: > > On Fri, Aug 15, 2025 at 03:10:29PM -0400, Liam R. Howlett wrote: > > > The ceiling and tree search limit need to be different arguments for the > > > future change in the failed fork attempt. > > > > > > No functional changes intended. > > > > > > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> > > > > (Obv. in addition to comment about broken VMA tests :P) > > > > I guess intent is that if we discover any page tables beyond tree_max then > > we ought to just wipe them all out so, in effect, we don't consider > > mappings at or past tree_max to be valid? > > Actually... there are some archs that map outside the vma and they are > valid.. I think mips? and I think lower, but yeah.. it's needed. This > is why prev->vm_end and next->vm_start are used as page table limits, > afaik. This is a serious annoyance because it frequently adds walks > that are infrequently necessary to the vma tree. ugh god. I was vaguely aware of this but that's gross. Very gross. I need to document all the VMA weirdnesses smoewhere. What do they do this for? Guard pages or something? > > > > > I feel like we need a comment to this effect as this is confusing as it is. > > > > Could we add a kerneldoc comment for free_pgtables() spelling this out? > > I'll add a note here, but confusion will probably increase. I'll add a > note about the tree max as well. Thanks! > > > > > > --- > > > mm/internal.h | 4 +++- > > > mm/memory.c | 7 ++++--- > > > mm/mmap.c | 2 +- > > > mm/vma.c | 3 ++- > > > 4 files changed, 10 insertions(+), 6 deletions(-) > > > > > > diff --git a/mm/internal.h b/mm/internal.h > > > index 45b725c3dc030..f9a278ac76d83 100644 > > > --- a/mm/internal.h > > > +++ b/mm/internal.h > > > @@ -444,7 +444,9 @@ void folio_activate(struct folio *folio); > > > > > > void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > > > struct vm_area_struct *start_vma, unsigned long floor, > > > - unsigned long ceiling, bool mm_wr_locked); > > > + unsigned long ceiling, unsigned long tree_max, > > > + bool mm_wr_locked); > > > + > > > void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); > > > > > > struct zap_details; > > > diff --git a/mm/memory.c b/mm/memory.c > > > index 0ba4f6b718471..3346514562bba 100644 > > > --- a/mm/memory.c > > > +++ b/mm/memory.c > > > @@ -371,7 +371,8 @@ void free_pgd_range(struct mmu_gather *tlb, > > > > > > void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > > > struct vm_area_struct *vma, unsigned long floor, > > > - unsigned long ceiling, bool mm_wr_locked) > > > + unsigned long ceiling, unsigned long tree_max, > > > + bool mm_wr_locked) > > > { > > > struct unlink_vma_file_batch vb; > > > > > > @@ -385,7 +386,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > > > * Note: USER_PGTABLES_CEILING may be passed as ceiling and may > > > * be 0. This will underflow and is okay. > > > */ > > > - next = mas_find(mas, ceiling - 1); > > > + next = mas_find(mas, tree_max - 1); > > > > Do we need to put some sort of sanity checks in to make sure tree_max <= ceiling > > (though this 0 case is a pain... so I guess tree_max - 1 <= ceiling - 1?) > > Sure! Thanks!
On 03.09.25 22:19, Liam R. Howlett wrote: > * Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250819 15:14]: >> On Fri, Aug 15, 2025 at 03:10:29PM -0400, Liam R. Howlett wrote: >>> The ceiling and tree search limit need to be different arguments for the >>> future change in the failed fork attempt. >>> >>> No functional changes intended. >>> >>> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> >> >> (Obv. in addition to comment about broken VMA tests :P) >> >> I guess intent is that if we discover any page tables beyond tree_max then >> we ought to just wipe them all out so, in effect, we don't consider >> mappings at or past tree_max to be valid? > > Actually... there are some archs that map outside the vma and they are > valid.. I think mips? and I think lower, but yeah.. it's needed. This > is why prev->vm_end and next->vm_start are used as page table limits, > afaik. This is a serious annoyance because it frequently adds walks > that are infrequently necessary to the vma tree. Hm, does that still exist? I recall something odd ... was it that gate area thingy (in_gate_area) we also have to handle in GUP code? The is x86/arm though, not mips. -- Cheers David / dhildenb
* David Hildenbrand <david@redhat.com> [250904 06:21]: > On 03.09.25 22:19, Liam R. Howlett wrote: > > * Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250819 15:14]: > > > On Fri, Aug 15, 2025 at 03:10:29PM -0400, Liam R. Howlett wrote: > > > > The ceiling and tree search limit need to be different arguments for the > > > > future change in the failed fork attempt. > > > > > > > > No functional changes intended. > > > > > > > > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> > > > > > > (Obv. in addition to comment about broken VMA tests :P) > > > > > > I guess intent is that if we discover any page tables beyond tree_max then > > > we ought to just wipe them all out so, in effect, we don't consider > > > mappings at or past tree_max to be valid? > > > > Actually... there are some archs that map outside the vma and they are > > valid.. I think mips? and I think lower, but yeah.. it's needed. This > > is why prev->vm_end and next->vm_start are used as page table limits, > > afaik. This is a serious annoyance because it frequently adds walks > > that are infrequently necessary to the vma tree. > > Hm, does that still exist? I think it does? arch/mips/mm/fault.c still checks for addresses between VMALLOC_START and VMALLOC_END, as well as MODULES_VADDR and MODULES_END and (potentially, depending on CONFIG) jumps to vmalloc_fault. I tried to find the statement of the start/end going to the next/prev vma that came across before, but I cannot. It may have been in a git log for something else entirely. > I recall something odd ... was it that gate area thingy (in_gate_area) we > also have to handle in GUP code? The is x86/arm though, not mips. IIRC, gate area is to do with vdso/vvars and so going to the maximum allowed pte would unmap that even when it's not in the vma tree - which it is not. This is true for most platforms. But if that's the case, then unmapping the last vma in the tree would cause the vdso to no longer work - which doesn't make sense to me? I'm not sure if any platform maps them at a low value so that "prev->vm_start or 0" makes sense, but I would not be surprised. Thanks, Liam
On Thu, Sep 04, 2025 at 12:20:55PM +0200, David Hildenbrand wrote: > On 03.09.25 22:19, Liam R. Howlett wrote: > > * Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250819 15:14]: > > > On Fri, Aug 15, 2025 at 03:10:29PM -0400, Liam R. Howlett wrote: > > > > The ceiling and tree search limit need to be different arguments for the > > > > future change in the failed fork attempt. > > > > > > > > No functional changes intended. > > > > > > > > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> > > > > > > (Obv. in addition to comment about broken VMA tests :P) > > > > > > I guess intent is that if we discover any page tables beyond tree_max then > > > we ought to just wipe them all out so, in effect, we don't consider > > > mappings at or past tree_max to be valid? > > > > Actually... there are some archs that map outside the vma and they are > > valid.. I think mips? and I think lower, but yeah.. it's needed. This > > is why prev->vm_end and next->vm_start are used as page table limits, > > afaik. This is a serious annoyance because it frequently adds walks > > that are infrequently necessary to the vma tree. > > Hm, does that still exist? > > I recall something odd ... was it that gate area thingy (in_gate_area) we > also have to handle in GUP code? The is x86/arm though, not mips. Isn't gate area the VSYSCALL area so that's basically a kernel mapping address that we allow userland to access? > > -- > Cheers > > David / dhildenb >
I know this is an RFC but to be mildly annoying, you need to also update tools/testing/vma/vma_internal.h to reflect this as this causes those tests to break. On Fri, Aug 15, 2025 at 03:10:29PM -0400, Liam R. Howlett wrote: > The ceiling and tree search limit need to be different arguments for the > future change in the failed fork attempt. > > No functional changes intended. > > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> > --- > mm/internal.h | 4 +++- > mm/memory.c | 7 ++++--- > mm/mmap.c | 2 +- > mm/vma.c | 3 ++- > 4 files changed, 10 insertions(+), 6 deletions(-) > > diff --git a/mm/internal.h b/mm/internal.h > index 45b725c3dc030..f9a278ac76d83 100644 > --- a/mm/internal.h > +++ b/mm/internal.h > @@ -444,7 +444,9 @@ void folio_activate(struct folio *folio); > > void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > struct vm_area_struct *start_vma, unsigned long floor, > - unsigned long ceiling, bool mm_wr_locked); > + unsigned long ceiling, unsigned long tree_max, > + bool mm_wr_locked); > + > void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); > > struct zap_details; > diff --git a/mm/memory.c b/mm/memory.c > index 0ba4f6b718471..3346514562bba 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -371,7 +371,8 @@ void free_pgd_range(struct mmu_gather *tlb, > > void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > struct vm_area_struct *vma, unsigned long floor, > - unsigned long ceiling, bool mm_wr_locked) > + unsigned long ceiling, unsigned long tree_max, > + bool mm_wr_locked) > { > struct unlink_vma_file_batch vb; > > @@ -385,7 +386,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > * Note: USER_PGTABLES_CEILING may be passed as ceiling and may > * be 0. This will underflow and is okay. > */ > - next = mas_find(mas, ceiling - 1); > + next = mas_find(mas, tree_max - 1); > if (unlikely(xa_is_zero(next))) > next = NULL; > > @@ -405,7 +406,7 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas, > */ > while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { > vma = next; > - next = mas_find(mas, ceiling - 1); > + next = mas_find(mas, tree_max - 1); > if (unlikely(xa_is_zero(next))) > next = NULL; > if (mm_wr_locked) > diff --git a/mm/mmap.c b/mm/mmap.c > index 0995a48b46d59..eba2bc81bc749 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -1311,7 +1311,7 @@ void exit_mmap(struct mm_struct *mm) > mt_clear_in_rcu(&mm->mm_mt); > vma_iter_set(&vmi, vma->vm_end); > free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, > - USER_PGTABLES_CEILING, true); > + USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true); > tlb_finish_mmu(&tlb); > > /* > diff --git a/mm/vma.c b/mm/vma.c > index fd270345c25d3..aa75ca8618609 100644 > --- a/mm/vma.c > +++ b/mm/vma.c > @@ -486,6 +486,7 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, > /* mm_wr_locked = */ true); > mas_set(mas, vma->vm_end); > free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, > + next ? next->vm_start : USER_PGTABLES_CEILING, > next ? next->vm_start : USER_PGTABLES_CEILING, > /* mm_wr_locked = */ true); > tlb_finish_mmu(&tlb); > @@ -1232,7 +1233,7 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms, > mas_set(mas_detach, 1); > /* start and end may be different if there is no prev or next vma. */ > free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start, > - vms->unmap_end, mm_wr_locked); > + vms->unmap_end, vms->unmap_end, mm_wr_locked); > tlb_finish_mmu(&tlb); > vms->clear_ptes = false; > } > -- > 2.47.2 >
* Lorenzo Stoakes <lorenzo.stoakes@oracle.com> [250818 11:36]: > I know this is an RFC but to be mildly annoying, you need to also update > tools/testing/vma/vma_internal.h to reflect this as this causes those tests > to break. Sounds good. I did check that, but on an earlier version with another vma testing fix. I'll check again before sending out another version. Thanks, Liam
© 2016 - 2025 Red Hat, Inc.