vma_iter_store() functions can be used both when adding a new vma and
when updating an existing one. However for existing ones we do not need
to mark them attached as they are already marked that way. Add a parameter
to distinguish the usage and skip vma_mark_attached() when not needed.
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
include/linux/mm.h | 12 ++++++++++++
mm/nommu.c | 4 ++--
mm/vma.c | 16 ++++++++--------
mm/vma.h | 13 +++++++++----
4 files changed, 31 insertions(+), 14 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 081178b0eec4..c50edfedd99d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
vma_assert_write_locked(vma);
}
+static inline void vma_assert_attached(struct vm_area_struct *vma)
+{
+ VM_BUG_ON_VMA(vma->detached, vma);
+}
+
+static inline void vma_assert_detached(struct vm_area_struct *vma)
+{
+ VM_BUG_ON_VMA(!vma->detached, vma);
+}
+
static inline void vma_mark_attached(struct vm_area_struct *vma)
{
vma->detached = false;
@@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ mmap_assert_write_locked(vma->vm_mm); }
+static inline void vma_assert_attached(struct vm_area_struct *vma) {}
+static inline void vma_assert_detached(struct vm_area_struct *vma) {}
static inline void vma_mark_attached(struct vm_area_struct *vma) {}
static inline void vma_mark_detached(struct vm_area_struct *vma) {}
diff --git a/mm/nommu.c b/mm/nommu.c
index 9cb6e99215e2..72c8c505836c 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1191,7 +1191,7 @@ unsigned long do_mmap(struct file *file,
setup_vma_to_mm(vma, current->mm);
current->mm->map_count++;
/* add the VMA to the tree */
- vma_iter_store(&vmi, vma);
+ vma_iter_store(&vmi, vma, true);
/* we flush the region from the icache only when the first executable
* mapping of it is made */
@@ -1356,7 +1356,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
setup_vma_to_mm(vma, mm);
setup_vma_to_mm(new, mm);
- vma_iter_store(vmi, new);
+ vma_iter_store(vmi, new, true);
mm->map_count++;
return 0;
diff --git a/mm/vma.c b/mm/vma.c
index 476146c25283..ce113dd8c471 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -306,7 +306,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
- vma_iter_store(vmi, vp->insert);
+ vma_iter_store(vmi, vp->insert, true);
mm->map_count++;
}
@@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
if (expanded)
- vma_iter_store(vmg->vmi, vmg->vma);
+ vma_iter_store(vmg->vmi, vmg->vma, false);
if (adj_start) {
adjust->vm_start += adj_start;
adjust->vm_pgoff += PHYS_PFN(adj_start);
if (adj_start < 0) {
WARN_ON(expanded);
- vma_iter_store(vmg->vmi, adjust);
+ vma_iter_store(vmg->vmi, adjust, false);
}
}
@@ -1689,7 +1689,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
return -ENOMEM;
vma_start_write(vma);
- vma_iter_store(&vmi, vma);
+ vma_iter_store(&vmi, vma, true);
vma_link_file(vma);
mm->map_count++;
validate_mm(mm);
@@ -2368,7 +2368,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
/* Lock the VMA since it is modified after insertion into VMA tree */
vma_start_write(vma);
- vma_iter_store(vmi, vma);
+ vma_iter_store(vmi, vma, true);
map->mm->map_count++;
vma_link_file(vma);
@@ -2542,7 +2542,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
vm_flags_init(vma, flags);
vma->vm_page_prot = vm_get_page_prot(flags);
vma_start_write(vma);
- if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
+ if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL, true))
goto mas_store_fail;
mm->map_count++;
@@ -2785,7 +2785,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
/* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
+ vma_iter_store(&vmi, vma, false);
anon_vma_interval_tree_post_update_vma(vma);
perf_event_mmap(vma);
@@ -2865,7 +2865,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
vma->vm_start = address;
vma->vm_pgoff -= grow;
/* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
+ vma_iter_store(&vmi, vma, false);
anon_vma_interval_tree_post_update_vma(vma);
perf_event_mmap(vma);
diff --git a/mm/vma.h b/mm/vma.h
index 24636a2b0acf..18c9e49b1eae 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -145,7 +145,7 @@ __must_check int vma_shrink(struct vma_iterator *vmi,
unsigned long start, unsigned long end, pgoff_t pgoff);
static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
- struct vm_area_struct *vma, gfp_t gfp)
+ struct vm_area_struct *vma, gfp_t gfp, bool new_vma)
{
if (vmi->mas.status != ma_start &&
@@ -157,7 +157,10 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
if (unlikely(mas_is_err(&vmi->mas)))
return -ENOMEM;
- vma_mark_attached(vma);
+ if (new_vma)
+ vma_mark_attached(vma);
+ vma_assert_attached(vma);
+
return 0;
}
@@ -366,7 +369,7 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
/* Store a VMA with preallocated memory */
static inline void vma_iter_store(struct vma_iterator *vmi,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma, bool new_vma)
{
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
@@ -390,7 +393,9 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_prealloc(&vmi->mas, vma);
- vma_mark_attached(vma);
+ if (new_vma)
+ vma_mark_attached(vma);
+ vma_assert_attached(vma);
}
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
--
2.47.1.613.gc27f4b7a9f-goog
* Suren Baghdasaryan <surenb@google.com> [241226 12:07]:
> vma_iter_store() functions can be used both when adding a new vma and
> when updating an existing one. However for existing ones we do not need
> to mark them attached as they are already marked that way. Add a parameter
> to distinguish the usage and skip vma_mark_attached() when not needed.
I really don't like boolean flags - especially to such a small function.
The passing of flags complicates things and is not self documenting. Can
we make a new vma_iter_store_detach() that just calls vma_iter_store()
then does the detach?
>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
> include/linux/mm.h | 12 ++++++++++++
> mm/nommu.c | 4 ++--
> mm/vma.c | 16 ++++++++--------
> mm/vma.h | 13 +++++++++----
> 4 files changed, 31 insertions(+), 14 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 081178b0eec4..c50edfedd99d 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
> vma_assert_write_locked(vma);
> }
>
> +static inline void vma_assert_attached(struct vm_area_struct *vma)
> +{
> + VM_BUG_ON_VMA(vma->detached, vma);
> +}
> +
> +static inline void vma_assert_detached(struct vm_area_struct *vma)
> +{
> + VM_BUG_ON_VMA(!vma->detached, vma);
> +}
> +
> static inline void vma_mark_attached(struct vm_area_struct *vma)
> {
> vma->detached = false;
> @@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
> static inline void vma_start_write(struct vm_area_struct *vma) {}
> static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> { mmap_assert_write_locked(vma->vm_mm); }
> +static inline void vma_assert_attached(struct vm_area_struct *vma) {}
> +static inline void vma_assert_detached(struct vm_area_struct *vma) {}
> static inline void vma_mark_attached(struct vm_area_struct *vma) {}
> static inline void vma_mark_detached(struct vm_area_struct *vma) {}
>
> diff --git a/mm/nommu.c b/mm/nommu.c
> index 9cb6e99215e2..72c8c505836c 100644
> --- a/mm/nommu.c
> +++ b/mm/nommu.c
> @@ -1191,7 +1191,7 @@ unsigned long do_mmap(struct file *file,
> setup_vma_to_mm(vma, current->mm);
> current->mm->map_count++;
> /* add the VMA to the tree */
> - vma_iter_store(&vmi, vma);
> + vma_iter_store(&vmi, vma, true);
>
> /* we flush the region from the icache only when the first executable
> * mapping of it is made */
> @@ -1356,7 +1356,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
>
> setup_vma_to_mm(vma, mm);
> setup_vma_to_mm(new, mm);
> - vma_iter_store(vmi, new);
> + vma_iter_store(vmi, new, true);
> mm->map_count++;
> return 0;
>
> diff --git a/mm/vma.c b/mm/vma.c
> index 476146c25283..ce113dd8c471 100644
> --- a/mm/vma.c
> +++ b/mm/vma.c
> @@ -306,7 +306,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
> * us to insert it before dropping the locks
> * (it may either follow vma or precede it).
> */
> - vma_iter_store(vmi, vp->insert);
> + vma_iter_store(vmi, vp->insert, true);
> mm->map_count++;
> }
>
> @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
> vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
>
> if (expanded)
> - vma_iter_store(vmg->vmi, vmg->vma);
> + vma_iter_store(vmg->vmi, vmg->vma, false);
>
> if (adj_start) {
> adjust->vm_start += adj_start;
> adjust->vm_pgoff += PHYS_PFN(adj_start);
> if (adj_start < 0) {
> WARN_ON(expanded);
> - vma_iter_store(vmg->vmi, adjust);
> + vma_iter_store(vmg->vmi, adjust, false);
> }
> }
>
> @@ -1689,7 +1689,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
> return -ENOMEM;
>
> vma_start_write(vma);
> - vma_iter_store(&vmi, vma);
> + vma_iter_store(&vmi, vma, true);
> vma_link_file(vma);
> mm->map_count++;
> validate_mm(mm);
> @@ -2368,7 +2368,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
>
> /* Lock the VMA since it is modified after insertion into VMA tree */
> vma_start_write(vma);
> - vma_iter_store(vmi, vma);
> + vma_iter_store(vmi, vma, true);
> map->mm->map_count++;
> vma_link_file(vma);
>
> @@ -2542,7 +2542,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
> vm_flags_init(vma, flags);
> vma->vm_page_prot = vm_get_page_prot(flags);
> vma_start_write(vma);
> - if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
> + if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL, true))
> goto mas_store_fail;
>
> mm->map_count++;
> @@ -2785,7 +2785,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> anon_vma_interval_tree_pre_update_vma(vma);
> vma->vm_end = address;
> /* Overwrite old entry in mtree. */
> - vma_iter_store(&vmi, vma);
> + vma_iter_store(&vmi, vma, false);
> anon_vma_interval_tree_post_update_vma(vma);
>
> perf_event_mmap(vma);
> @@ -2865,7 +2865,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
> vma->vm_start = address;
> vma->vm_pgoff -= grow;
> /* Overwrite old entry in mtree. */
> - vma_iter_store(&vmi, vma);
> + vma_iter_store(&vmi, vma, false);
> anon_vma_interval_tree_post_update_vma(vma);
>
> perf_event_mmap(vma);
> diff --git a/mm/vma.h b/mm/vma.h
> index 24636a2b0acf..18c9e49b1eae 100644
> --- a/mm/vma.h
> +++ b/mm/vma.h
> @@ -145,7 +145,7 @@ __must_check int vma_shrink(struct vma_iterator *vmi,
> unsigned long start, unsigned long end, pgoff_t pgoff);
>
> static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
> - struct vm_area_struct *vma, gfp_t gfp)
> + struct vm_area_struct *vma, gfp_t gfp, bool new_vma)
>
> {
> if (vmi->mas.status != ma_start &&
> @@ -157,7 +157,10 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
> if (unlikely(mas_is_err(&vmi->mas)))
> return -ENOMEM;
>
> - vma_mark_attached(vma);
> + if (new_vma)
> + vma_mark_attached(vma);
> + vma_assert_attached(vma);
> +
> return 0;
> }
>
> @@ -366,7 +369,7 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
>
> /* Store a VMA with preallocated memory */
> static inline void vma_iter_store(struct vma_iterator *vmi,
> - struct vm_area_struct *vma)
> + struct vm_area_struct *vma, bool new_vma)
> {
>
> #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> @@ -390,7 +393,9 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
>
> __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
> mas_store_prealloc(&vmi->mas, vma);
> - vma_mark_attached(vma);
> + if (new_vma)
> + vma_mark_attached(vma);
> + vma_assert_attached(vma);
> }
>
> static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
> --
> 2.47.1.613.gc27f4b7a9f-goog
>
On Tue, Jan 7, 2025 at 8:50 AM Liam R. Howlett <Liam.Howlett@oracle.com> wrote:
>
> * Suren Baghdasaryan <surenb@google.com> [241226 12:07]:
> > vma_iter_store() functions can be used both when adding a new vma and
> > when updating an existing one. However for existing ones we do not need
> > to mark them attached as they are already marked that way. Add a parameter
> > to distinguish the usage and skip vma_mark_attached() when not needed.
>
> I really don't like boolean flags - especially to such a small function.
>
> The passing of flags complicates things and is not self documenting. Can
> we make a new vma_iter_store_detach() that just calls vma_iter_store()
> then does the detach?
Sure, I'll do that. Thanks for the feedback!
>
> >
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> > include/linux/mm.h | 12 ++++++++++++
> > mm/nommu.c | 4 ++--
> > mm/vma.c | 16 ++++++++--------
> > mm/vma.h | 13 +++++++++----
> > 4 files changed, 31 insertions(+), 14 deletions(-)
> >
> > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > index 081178b0eec4..c50edfedd99d 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
> > vma_assert_write_locked(vma);
> > }
> >
> > +static inline void vma_assert_attached(struct vm_area_struct *vma)
> > +{
> > + VM_BUG_ON_VMA(vma->detached, vma);
> > +}
> > +
> > +static inline void vma_assert_detached(struct vm_area_struct *vma)
> > +{
> > + VM_BUG_ON_VMA(!vma->detached, vma);
> > +}
> > +
> > static inline void vma_mark_attached(struct vm_area_struct *vma)
> > {
> > vma->detached = false;
> > @@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
> > static inline void vma_start_write(struct vm_area_struct *vma) {}
> > static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> > { mmap_assert_write_locked(vma->vm_mm); }
> > +static inline void vma_assert_attached(struct vm_area_struct *vma) {}
> > +static inline void vma_assert_detached(struct vm_area_struct *vma) {}
> > static inline void vma_mark_attached(struct vm_area_struct *vma) {}
> > static inline void vma_mark_detached(struct vm_area_struct *vma) {}
> >
> > diff --git a/mm/nommu.c b/mm/nommu.c
> > index 9cb6e99215e2..72c8c505836c 100644
> > --- a/mm/nommu.c
> > +++ b/mm/nommu.c
> > @@ -1191,7 +1191,7 @@ unsigned long do_mmap(struct file *file,
> > setup_vma_to_mm(vma, current->mm);
> > current->mm->map_count++;
> > /* add the VMA to the tree */
> > - vma_iter_store(&vmi, vma);
> > + vma_iter_store(&vmi, vma, true);
> >
> > /* we flush the region from the icache only when the first executable
> > * mapping of it is made */
> > @@ -1356,7 +1356,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
> >
> > setup_vma_to_mm(vma, mm);
> > setup_vma_to_mm(new, mm);
> > - vma_iter_store(vmi, new);
> > + vma_iter_store(vmi, new, true);
> > mm->map_count++;
> > return 0;
> >
> > diff --git a/mm/vma.c b/mm/vma.c
> > index 476146c25283..ce113dd8c471 100644
> > --- a/mm/vma.c
> > +++ b/mm/vma.c
> > @@ -306,7 +306,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
> > * us to insert it before dropping the locks
> > * (it may either follow vma or precede it).
> > */
> > - vma_iter_store(vmi, vp->insert);
> > + vma_iter_store(vmi, vp->insert, true);
> > mm->map_count++;
> > }
> >
> > @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
> > vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
> >
> > if (expanded)
> > - vma_iter_store(vmg->vmi, vmg->vma);
> > + vma_iter_store(vmg->vmi, vmg->vma, false);
> >
> > if (adj_start) {
> > adjust->vm_start += adj_start;
> > adjust->vm_pgoff += PHYS_PFN(adj_start);
> > if (adj_start < 0) {
> > WARN_ON(expanded);
> > - vma_iter_store(vmg->vmi, adjust);
> > + vma_iter_store(vmg->vmi, adjust, false);
> > }
> > }
> >
> > @@ -1689,7 +1689,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
> > return -ENOMEM;
> >
> > vma_start_write(vma);
> > - vma_iter_store(&vmi, vma);
> > + vma_iter_store(&vmi, vma, true);
> > vma_link_file(vma);
> > mm->map_count++;
> > validate_mm(mm);
> > @@ -2368,7 +2368,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
> >
> > /* Lock the VMA since it is modified after insertion into VMA tree */
> > vma_start_write(vma);
> > - vma_iter_store(vmi, vma);
> > + vma_iter_store(vmi, vma, true);
> > map->mm->map_count++;
> > vma_link_file(vma);
> >
> > @@ -2542,7 +2542,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
> > vm_flags_init(vma, flags);
> > vma->vm_page_prot = vm_get_page_prot(flags);
> > vma_start_write(vma);
> > - if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
> > + if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL, true))
> > goto mas_store_fail;
> >
> > mm->map_count++;
> > @@ -2785,7 +2785,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> > anon_vma_interval_tree_pre_update_vma(vma);
> > vma->vm_end = address;
> > /* Overwrite old entry in mtree. */
> > - vma_iter_store(&vmi, vma);
> > + vma_iter_store(&vmi, vma, false);
> > anon_vma_interval_tree_post_update_vma(vma);
> >
> > perf_event_mmap(vma);
> > @@ -2865,7 +2865,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
> > vma->vm_start = address;
> > vma->vm_pgoff -= grow;
> > /* Overwrite old entry in mtree. */
> > - vma_iter_store(&vmi, vma);
> > + vma_iter_store(&vmi, vma, false);
> > anon_vma_interval_tree_post_update_vma(vma);
> >
> > perf_event_mmap(vma);
> > diff --git a/mm/vma.h b/mm/vma.h
> > index 24636a2b0acf..18c9e49b1eae 100644
> > --- a/mm/vma.h
> > +++ b/mm/vma.h
> > @@ -145,7 +145,7 @@ __must_check int vma_shrink(struct vma_iterator *vmi,
> > unsigned long start, unsigned long end, pgoff_t pgoff);
> >
> > static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
> > - struct vm_area_struct *vma, gfp_t gfp)
> > + struct vm_area_struct *vma, gfp_t gfp, bool new_vma)
> >
> > {
> > if (vmi->mas.status != ma_start &&
> > @@ -157,7 +157,10 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
> > if (unlikely(mas_is_err(&vmi->mas)))
> > return -ENOMEM;
> >
> > - vma_mark_attached(vma);
> > + if (new_vma)
> > + vma_mark_attached(vma);
> > + vma_assert_attached(vma);
> > +
> > return 0;
> > }
> >
> > @@ -366,7 +369,7 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
> >
> > /* Store a VMA with preallocated memory */
> > static inline void vma_iter_store(struct vma_iterator *vmi,
> > - struct vm_area_struct *vma)
> > + struct vm_area_struct *vma, bool new_vma)
> > {
> >
> > #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> > @@ -390,7 +393,9 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
> >
> > __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
> > mas_store_prealloc(&vmi->mas, vma);
> > - vma_mark_attached(vma);
> > + if (new_vma)
> > + vma_mark_attached(vma);
> > + vma_assert_attached(vma);
> > }
> >
> > static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
> > --
> > 2.47.1.613.gc27f4b7a9f-goog
> >
On 12/26/24 18:06, Suren Baghdasaryan wrote: > vma_iter_store() functions can be used both when adding a new vma and > when updating an existing one. However for existing ones we do not need > to mark them attached as they are already marked that way. Add a parameter > to distinguish the usage and skip vma_mark_attached() when not needed. > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> Seems like an overkill? Looks the common case is the "true" case so add a variant for the false case? Also seems no _store_gfp caller uses false so that doesn't need to exist anyway?
© 2016 - 2026 Red Hat, Inc.