vma_iter_store() functions can be used both when adding a new vma and
when updating an existing one. However for existing ones we do not need
to mark them attached as they are already marked that way. Introduce
vma_iter_store_attached() to be used with already attached vmas.
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
---
include/linux/mm.h | 12 ++++++++++++
mm/vma.c | 8 ++++----
mm/vma.h | 11 +++++++++--
3 files changed, 25 insertions(+), 6 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2b322871da87..2f805f1a0176 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
vma_assert_write_locked(vma);
}
+static inline void vma_assert_attached(struct vm_area_struct *vma)
+{
+ VM_BUG_ON_VMA(vma->detached, vma);
+}
+
+static inline void vma_assert_detached(struct vm_area_struct *vma)
+{
+ VM_BUG_ON_VMA(!vma->detached, vma);
+}
+
static inline void vma_mark_attached(struct vm_area_struct *vma)
{
vma->detached = false;
@@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{ mmap_assert_write_locked(vma->vm_mm); }
+static inline void vma_assert_attached(struct vm_area_struct *vma) {}
+static inline void vma_assert_detached(struct vm_area_struct *vma) {}
static inline void vma_mark_attached(struct vm_area_struct *vma) {}
static inline void vma_mark_detached(struct vm_area_struct *vma) {}
diff --git a/mm/vma.c b/mm/vma.c
index d603494e69d7..b9cf552e120c 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
if (expanded)
- vma_iter_store(vmg->vmi, vmg->vma);
+ vma_iter_store_attached(vmg->vmi, vmg->vma);
if (adj_start) {
adjust->vm_start += adj_start;
adjust->vm_pgoff += PHYS_PFN(adj_start);
if (adj_start < 0) {
WARN_ON(expanded);
- vma_iter_store(vmg->vmi, adjust);
+ vma_iter_store_attached(vmg->vmi, adjust);
}
}
@@ -2845,7 +2845,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
/* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
+ vma_iter_store_attached(&vmi, vma);
anon_vma_interval_tree_post_update_vma(vma);
perf_event_mmap(vma);
@@ -2925,7 +2925,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
vma->vm_start = address;
vma->vm_pgoff -= grow;
/* Overwrite old entry in mtree. */
- vma_iter_store(&vmi, vma);
+ vma_iter_store_attached(&vmi, vma);
anon_vma_interval_tree_post_update_vma(vma);
perf_event_mmap(vma);
diff --git a/mm/vma.h b/mm/vma.h
index 2a2668de8d2c..63dd38d5230c 100644
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -365,9 +365,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
}
/* Store a VMA with preallocated memory */
-static inline void vma_iter_store(struct vma_iterator *vmi,
- struct vm_area_struct *vma)
+static inline void vma_iter_store_attached(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
{
+ vma_assert_attached(vma);
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
@@ -390,7 +391,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
mas_store_prealloc(&vmi->mas, vma);
+}
+
+static inline void vma_iter_store(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
vma_mark_attached(vma);
+ vma_iter_store_attached(vmi, vma);
}
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
--
2.47.1.613.gc27f4b7a9f-goog
On Fri, Jan 10, 2025 at 08:25:51PM -0800, Suren Baghdasaryan wrote:
> vma_iter_store() functions can be used both when adding a new vma and
> when updating an existing one. However for existing ones we do not need
> to mark them attached as they are already marked that way. Introduce
> vma_iter_store_attached() to be used with already attached vmas.
OK I guess the intent of this is to reinstate the previously existing
asserts, only explicitly checking those places where we attach.
I'm a little concerned that by doing this, somebody might simply invoke
this function without realising the implications.
Can we have something functional like
vma_iter_store_new() and vma_iter_store_overwrite()
?
I don't like us just leaving vma_iter_store() quietly making an assumption
that a caller doesn't necessarily realise.
Also it's more greppable this way.
I had a look through callers and it does seem you've snagged them all
correctly.
>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> ---
> include/linux/mm.h | 12 ++++++++++++
> mm/vma.c | 8 ++++----
> mm/vma.h | 11 +++++++++--
> 3 files changed, 25 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 2b322871da87..2f805f1a0176 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
> vma_assert_write_locked(vma);
> }
>
> +static inline void vma_assert_attached(struct vm_area_struct *vma)
> +{
> + VM_BUG_ON_VMA(vma->detached, vma);
> +}
> +
> +static inline void vma_assert_detached(struct vm_area_struct *vma)
> +{
> + VM_BUG_ON_VMA(!vma->detached, vma);
> +}
> +
> static inline void vma_mark_attached(struct vm_area_struct *vma)
> {
> vma->detached = false;
> @@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
> static inline void vma_start_write(struct vm_area_struct *vma) {}
> static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> { mmap_assert_write_locked(vma->vm_mm); }
> +static inline void vma_assert_attached(struct vm_area_struct *vma) {}
> +static inline void vma_assert_detached(struct vm_area_struct *vma) {}
> static inline void vma_mark_attached(struct vm_area_struct *vma) {}
> static inline void vma_mark_detached(struct vm_area_struct *vma) {}
>
> diff --git a/mm/vma.c b/mm/vma.c
> index d603494e69d7..b9cf552e120c 100644
> --- a/mm/vma.c
> +++ b/mm/vma.c
> @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
> vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
>
> if (expanded)
> - vma_iter_store(vmg->vmi, vmg->vma);
> + vma_iter_store_attached(vmg->vmi, vmg->vma);
>
> if (adj_start) {
> adjust->vm_start += adj_start;
> adjust->vm_pgoff += PHYS_PFN(adj_start);
> if (adj_start < 0) {
> WARN_ON(expanded);
> - vma_iter_store(vmg->vmi, adjust);
> + vma_iter_store_attached(vmg->vmi, adjust);
> }
> }
I kind of feel this whole function (that yes, I added :>) though derived
from existing logic) needs rework, as it's necessarily rather confusing.
But hey, that's on me :)
But this does look right... OK see this as a note-to-self...
>
> @@ -2845,7 +2845,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> anon_vma_interval_tree_pre_update_vma(vma);
> vma->vm_end = address;
> /* Overwrite old entry in mtree. */
> - vma_iter_store(&vmi, vma);
> + vma_iter_store_attached(&vmi, vma);
> anon_vma_interval_tree_post_update_vma(vma);
>
> perf_event_mmap(vma);
> @@ -2925,7 +2925,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
> vma->vm_start = address;
> vma->vm_pgoff -= grow;
> /* Overwrite old entry in mtree. */
> - vma_iter_store(&vmi, vma);
> + vma_iter_store_attached(&vmi, vma);
> anon_vma_interval_tree_post_update_vma(vma);
>
> perf_event_mmap(vma);
> diff --git a/mm/vma.h b/mm/vma.h
> index 2a2668de8d2c..63dd38d5230c 100644
> --- a/mm/vma.h
> +++ b/mm/vma.h
> @@ -365,9 +365,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
> }
>
> /* Store a VMA with preallocated memory */
> -static inline void vma_iter_store(struct vma_iterator *vmi,
> - struct vm_area_struct *vma)
> +static inline void vma_iter_store_attached(struct vma_iterator *vmi,
> + struct vm_area_struct *vma)
> {
> + vma_assert_attached(vma);
>
> #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
> @@ -390,7 +391,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
>
> __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
> mas_store_prealloc(&vmi->mas, vma);
> +}
> +
> +static inline void vma_iter_store(struct vma_iterator *vmi,
> + struct vm_area_struct *vma)
> +{
> vma_mark_attached(vma);
> + vma_iter_store_attached(vmi, vma);
> }
>
See comment at top, and we need some comments here to explain why we're
going to pains to do this.
What about mm/nommu.c? I guess these cases are always new VMAs.
We probably definitely need to check this series in a nommu setup, have you
done this? As I can see this breaking things. Then again I suppose you'd
have expected bots to moan by now...
> static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
> --
> 2.47.1.613.gc27f4b7a9f-goog
>
On Mon, Jan 13, 2025 at 3:58 AM Lorenzo Stoakes
<lorenzo.stoakes@oracle.com> wrote:
>
> On Fri, Jan 10, 2025 at 08:25:51PM -0800, Suren Baghdasaryan wrote:
> > vma_iter_store() functions can be used both when adding a new vma and
> > when updating an existing one. However for existing ones we do not need
> > to mark them attached as they are already marked that way. Introduce
> > vma_iter_store_attached() to be used with already attached vmas.
>
> OK I guess the intent of this is to reinstate the previously existing
> asserts, only explicitly checking those places where we attach.
No, the motivation is to prevern re-attaching an already attached vma
or re-detaching an already detached vma for state consistency. I guess
I should amend the description to make that clear.
>
> I'm a little concerned that by doing this, somebody might simply invoke
> this function without realising the implications.
Well, in that case somebody should get an assertion. If
vma_iter_store() is called against already attached vma, we get this
assertion:
vma_iter_store()
vma_mark_attached()
vma_assert_detached()
If vma_iter_store_attached() is called against a detached vma, we get this one:
vma_iter_store_attached()
vma_assert_attached()
Does that address your concern?
>
> Can we have something functional like
>
> vma_iter_store_new() and vma_iter_store_overwrite()
Ok. A bit more churn but should not be too bad.
>
> ?
>
> I don't like us just leaving vma_iter_store() quietly making an assumption
> that a caller doesn't necessarily realise.
>
> Also it's more greppable this way.
>
> I had a look through callers and it does seem you've snagged them all
> correctly.
>
> >
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> > ---
> > include/linux/mm.h | 12 ++++++++++++
> > mm/vma.c | 8 ++++----
> > mm/vma.h | 11 +++++++++--
> > 3 files changed, 25 insertions(+), 6 deletions(-)
> >
> > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > index 2b322871da87..2f805f1a0176 100644
> > --- a/include/linux/mm.h
> > +++ b/include/linux/mm.h
> > @@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
> > vma_assert_write_locked(vma);
> > }
> >
> > +static inline void vma_assert_attached(struct vm_area_struct *vma)
> > +{
> > + VM_BUG_ON_VMA(vma->detached, vma);
> > +}
> > +
> > +static inline void vma_assert_detached(struct vm_area_struct *vma)
> > +{
> > + VM_BUG_ON_VMA(!vma->detached, vma);
> > +}
> > +
> > static inline void vma_mark_attached(struct vm_area_struct *vma)
> > {
> > vma->detached = false;
> > @@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
> > static inline void vma_start_write(struct vm_area_struct *vma) {}
> > static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> > { mmap_assert_write_locked(vma->vm_mm); }
> > +static inline void vma_assert_attached(struct vm_area_struct *vma) {}
> > +static inline void vma_assert_detached(struct vm_area_struct *vma) {}
> > static inline void vma_mark_attached(struct vm_area_struct *vma) {}
> > static inline void vma_mark_detached(struct vm_area_struct *vma) {}
> >
> > diff --git a/mm/vma.c b/mm/vma.c
> > index d603494e69d7..b9cf552e120c 100644
> > --- a/mm/vma.c
> > +++ b/mm/vma.c
> > @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
> > vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
> >
> > if (expanded)
> > - vma_iter_store(vmg->vmi, vmg->vma);
> > + vma_iter_store_attached(vmg->vmi, vmg->vma);
> >
> > if (adj_start) {
> > adjust->vm_start += adj_start;
> > adjust->vm_pgoff += PHYS_PFN(adj_start);
> > if (adj_start < 0) {
> > WARN_ON(expanded);
> > - vma_iter_store(vmg->vmi, adjust);
> > + vma_iter_store_attached(vmg->vmi, adjust);
> > }
> > }
>
> I kind of feel this whole function (that yes, I added :>) though derived
> from existing logic) needs rework, as it's necessarily rather confusing.
>
> But hey, that's on me :)
>
> But this does look right... OK see this as a note-to-self...
>
> >
> > @@ -2845,7 +2845,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> > anon_vma_interval_tree_pre_update_vma(vma);
> > vma->vm_end = address;
> > /* Overwrite old entry in mtree. */
> > - vma_iter_store(&vmi, vma);
> > + vma_iter_store_attached(&vmi, vma);
> > anon_vma_interval_tree_post_update_vma(vma);
> >
> > perf_event_mmap(vma);
> > @@ -2925,7 +2925,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
> > vma->vm_start = address;
> > vma->vm_pgoff -= grow;
> > /* Overwrite old entry in mtree. */
> > - vma_iter_store(&vmi, vma);
> > + vma_iter_store_attached(&vmi, vma);
> > anon_vma_interval_tree_post_update_vma(vma);
> >
> > perf_event_mmap(vma);
> > diff --git a/mm/vma.h b/mm/vma.h
> > index 2a2668de8d2c..63dd38d5230c 100644
> > --- a/mm/vma.h
> > +++ b/mm/vma.h
> > @@ -365,9 +365,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
> > }
> >
> > /* Store a VMA with preallocated memory */
> > -static inline void vma_iter_store(struct vma_iterator *vmi,
> > - struct vm_area_struct *vma)
> > +static inline void vma_iter_store_attached(struct vma_iterator *vmi,
> > + struct vm_area_struct *vma)
> > {
> > + vma_assert_attached(vma);
> >
> > #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> > if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
> > @@ -390,7 +391,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
> >
> > __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
> > mas_store_prealloc(&vmi->mas, vma);
> > +}
> > +
> > +static inline void vma_iter_store(struct vma_iterator *vmi,
> > + struct vm_area_struct *vma)
> > +{
> > vma_mark_attached(vma);
> > + vma_iter_store_attached(vmi, vma);
> > }
> >
>
> See comment at top, and we need some comments here to explain why we're
> going to pains to do this.
Ack. I'll amend the patch description to make that clear.
>
> What about mm/nommu.c? I guess these cases are always new VMAs.
CONFIG_PER_VMA_LOCK depends on !CONFIG_NOMMU, so for nommu case all
these attach/detach functions become NOPs.
>
> We probably definitely need to check this series in a nommu setup, have you
> done this? As I can see this breaking things. Then again I suppose you'd
> have expected bots to moan by now...
>
> > static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
> > --
> > 2.47.1.613.gc27f4b7a9f-goog
> >
On Mon, Jan 13, 2025 at 08:31:45AM -0800, Suren Baghdasaryan wrote:
> On Mon, Jan 13, 2025 at 3:58 AM Lorenzo Stoakes
> <lorenzo.stoakes@oracle.com> wrote:
> >
> > On Fri, Jan 10, 2025 at 08:25:51PM -0800, Suren Baghdasaryan wrote:
> > > vma_iter_store() functions can be used both when adding a new vma and
> > > when updating an existing one. However for existing ones we do not need
> > > to mark them attached as they are already marked that way. Introduce
> > > vma_iter_store_attached() to be used with already attached vmas.
> >
> > OK I guess the intent of this is to reinstate the previously existing
> > asserts, only explicitly checking those places where we attach.
>
> No, the motivation is to prevern re-attaching an already attached vma
> or re-detaching an already detached vma for state consistency. I guess
> I should amend the description to make that clear.
Sorry for noise, missed this reply.
What I mean by this is, in a past iteration of this series I reviewed code
where you did this but did _not_ differentiate between cases of new VMAs
vs. existing, which caused an assert in your series which I reported.
So I"m saying - now you _are_ differentiating between the two cases.
It's certainly worth belabouring the point of exactly what it is you are
trying to catch here, however! :) So yes please do add a little more to
commit msg that'd be great, thanks!
>
> >
> > I'm a little concerned that by doing this, somebody might simply invoke
> > this function without realising the implications.
>
> Well, in that case somebody should get an assertion. If
> vma_iter_store() is called against already attached vma, we get this
> assertion:
>
> vma_iter_store()
> vma_mark_attached()
> vma_assert_detached()
>
> If vma_iter_store_attached() is called against a detached vma, we get this one:
>
> vma_iter_store_attached()
> vma_assert_attached()
>
> Does that address your concern?
>
> >
> > Can we have something functional like
> >
> > vma_iter_store_new() and vma_iter_store_overwrite()
>
> Ok. A bit more churn but should not be too bad.
>
> >
> > ?
> >
> > I don't like us just leaving vma_iter_store() quietly making an assumption
> > that a caller doesn't necessarily realise.
> >
> > Also it's more greppable this way.
> >
> > I had a look through callers and it does seem you've snagged them all
> > correctly.
> >
> > >
> > > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > > Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> > > ---
> > > include/linux/mm.h | 12 ++++++++++++
> > > mm/vma.c | 8 ++++----
> > > mm/vma.h | 11 +++++++++--
> > > 3 files changed, 25 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > > index 2b322871da87..2f805f1a0176 100644
> > > --- a/include/linux/mm.h
> > > +++ b/include/linux/mm.h
> > > @@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
> > > vma_assert_write_locked(vma);
> > > }
> > >
> > > +static inline void vma_assert_attached(struct vm_area_struct *vma)
> > > +{
> > > + VM_BUG_ON_VMA(vma->detached, vma);
> > > +}
> > > +
> > > +static inline void vma_assert_detached(struct vm_area_struct *vma)
> > > +{
> > > + VM_BUG_ON_VMA(!vma->detached, vma);
> > > +}
> > > +
> > > static inline void vma_mark_attached(struct vm_area_struct *vma)
> > > {
> > > vma->detached = false;
> > > @@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
> > > static inline void vma_start_write(struct vm_area_struct *vma) {}
> > > static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> > > { mmap_assert_write_locked(vma->vm_mm); }
> > > +static inline void vma_assert_attached(struct vm_area_struct *vma) {}
> > > +static inline void vma_assert_detached(struct vm_area_struct *vma) {}
> > > static inline void vma_mark_attached(struct vm_area_struct *vma) {}
> > > static inline void vma_mark_detached(struct vm_area_struct *vma) {}
> > >
> > > diff --git a/mm/vma.c b/mm/vma.c
> > > index d603494e69d7..b9cf552e120c 100644
> > > --- a/mm/vma.c
> > > +++ b/mm/vma.c
> > > @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
> > > vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
> > >
> > > if (expanded)
> > > - vma_iter_store(vmg->vmi, vmg->vma);
> > > + vma_iter_store_attached(vmg->vmi, vmg->vma);
> > >
> > > if (adj_start) {
> > > adjust->vm_start += adj_start;
> > > adjust->vm_pgoff += PHYS_PFN(adj_start);
> > > if (adj_start < 0) {
> > > WARN_ON(expanded);
> > > - vma_iter_store(vmg->vmi, adjust);
> > > + vma_iter_store_attached(vmg->vmi, adjust);
> > > }
> > > }
> >
> > I kind of feel this whole function (that yes, I added :>) though derived
> > from existing logic) needs rework, as it's necessarily rather confusing.
> >
> > But hey, that's on me :)
> >
> > But this does look right... OK see this as a note-to-self...
> >
> > >
> > > @@ -2845,7 +2845,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> > > anon_vma_interval_tree_pre_update_vma(vma);
> > > vma->vm_end = address;
> > > /* Overwrite old entry in mtree. */
> > > - vma_iter_store(&vmi, vma);
> > > + vma_iter_store_attached(&vmi, vma);
> > > anon_vma_interval_tree_post_update_vma(vma);
> > >
> > > perf_event_mmap(vma);
> > > @@ -2925,7 +2925,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
> > > vma->vm_start = address;
> > > vma->vm_pgoff -= grow;
> > > /* Overwrite old entry in mtree. */
> > > - vma_iter_store(&vmi, vma);
> > > + vma_iter_store_attached(&vmi, vma);
> > > anon_vma_interval_tree_post_update_vma(vma);
> > >
> > > perf_event_mmap(vma);
> > > diff --git a/mm/vma.h b/mm/vma.h
> > > index 2a2668de8d2c..63dd38d5230c 100644
> > > --- a/mm/vma.h
> > > +++ b/mm/vma.h
> > > @@ -365,9 +365,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
> > > }
> > >
> > > /* Store a VMA with preallocated memory */
> > > -static inline void vma_iter_store(struct vma_iterator *vmi,
> > > - struct vm_area_struct *vma)
> > > +static inline void vma_iter_store_attached(struct vma_iterator *vmi,
> > > + struct vm_area_struct *vma)
> > > {
> > > + vma_assert_attached(vma);
> > >
> > > #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> > > if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
> > > @@ -390,7 +391,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
> > >
> > > __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
> > > mas_store_prealloc(&vmi->mas, vma);
> > > +}
> > > +
> > > +static inline void vma_iter_store(struct vma_iterator *vmi,
> > > + struct vm_area_struct *vma)
> > > +{
> > > vma_mark_attached(vma);
> > > + vma_iter_store_attached(vmi, vma);
> > > }
> > >
> >
> > See comment at top, and we need some comments here to explain why we're
> > going to pains to do this.
>
> Ack. I'll amend the patch description to make that clear.
>
> >
> > What about mm/nommu.c? I guess these cases are always new VMAs.
>
> CONFIG_PER_VMA_LOCK depends on !CONFIG_NOMMU, so for nommu case all
> these attach/detach functions become NOPs.
>
> >
> > We probably definitely need to check this series in a nommu setup, have you
> > done this? As I can see this breaking things. Then again I suppose you'd
> > have expected bots to moan by now...
> >
> > > static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
> > > --
> > > 2.47.1.613.gc27f4b7a9f-goog
> > >
On Mon, Jan 13, 2025 at 8:48 AM Lorenzo Stoakes
<lorenzo.stoakes@oracle.com> wrote:
>
> On Mon, Jan 13, 2025 at 08:31:45AM -0800, Suren Baghdasaryan wrote:
> > On Mon, Jan 13, 2025 at 3:58 AM Lorenzo Stoakes
> > <lorenzo.stoakes@oracle.com> wrote:
> > >
> > > On Fri, Jan 10, 2025 at 08:25:51PM -0800, Suren Baghdasaryan wrote:
> > > > vma_iter_store() functions can be used both when adding a new vma and
> > > > when updating an existing one. However for existing ones we do not need
> > > > to mark them attached as they are already marked that way. Introduce
> > > > vma_iter_store_attached() to be used with already attached vmas.
> > >
> > > OK I guess the intent of this is to reinstate the previously existing
> > > asserts, only explicitly checking those places where we attach.
> >
> > No, the motivation is to prevern re-attaching an already attached vma
> > or re-detaching an already detached vma for state consistency. I guess
> > I should amend the description to make that clear.
>
> Sorry for noise, missed this reply.
>
> What I mean by this is, in a past iteration of this series I reviewed code
> where you did this but did _not_ differentiate between cases of new VMAs
> vs. existing, which caused an assert in your series which I reported.
>
> So I"m saying - now you _are_ differentiating between the two cases.
>
> It's certainly worth belabouring the point of exactly what it is you are
> trying to catch here, however! :) So yes please do add a little more to
> commit msg that'd be great, thanks!
Sure. How about:
With vma->detached being a separate flag, double-marking a vmas as
attached or detached is not an issue because the flag will simply be
overwritten with the same value. However once we fold this flag into
the refcount later in this series, re-attaching or re-detaching a vma
becomes an issue since these operations will be
incrementing/decrementing a refcount. Fix the places where we
currently re-attaching a vma during vma update and add assertions in
vma_mark_attached()/vma_mark_detached() to catch invalid usage.
>
> >
> > >
> > > I'm a little concerned that by doing this, somebody might simply invoke
> > > this function without realising the implications.
> >
> > Well, in that case somebody should get an assertion. If
> > vma_iter_store() is called against already attached vma, we get this
> > assertion:
> >
> > vma_iter_store()
> > vma_mark_attached()
> > vma_assert_detached()
> >
> > If vma_iter_store_attached() is called against a detached vma, we get this one:
> >
> > vma_iter_store_attached()
> > vma_assert_attached()
> >
> > Does that address your concern?
> >
> > >
> > > Can we have something functional like
> > >
> > > vma_iter_store_new() and vma_iter_store_overwrite()
> >
> > Ok. A bit more churn but should not be too bad.
> >
> > >
> > > ?
> > >
> > > I don't like us just leaving vma_iter_store() quietly making an assumption
> > > that a caller doesn't necessarily realise.
> > >
> > > Also it's more greppable this way.
> > >
> > > I had a look through callers and it does seem you've snagged them all
> > > correctly.
> > >
> > > >
> > > > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > > > Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> > > > ---
> > > > include/linux/mm.h | 12 ++++++++++++
> > > > mm/vma.c | 8 ++++----
> > > > mm/vma.h | 11 +++++++++--
> > > > 3 files changed, 25 insertions(+), 6 deletions(-)
> > > >
> > > > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > > > index 2b322871da87..2f805f1a0176 100644
> > > > --- a/include/linux/mm.h
> > > > +++ b/include/linux/mm.h
> > > > @@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
> > > > vma_assert_write_locked(vma);
> > > > }
> > > >
> > > > +static inline void vma_assert_attached(struct vm_area_struct *vma)
> > > > +{
> > > > + VM_BUG_ON_VMA(vma->detached, vma);
> > > > +}
> > > > +
> > > > +static inline void vma_assert_detached(struct vm_area_struct *vma)
> > > > +{
> > > > + VM_BUG_ON_VMA(!vma->detached, vma);
> > > > +}
> > > > +
> > > > static inline void vma_mark_attached(struct vm_area_struct *vma)
> > > > {
> > > > vma->detached = false;
> > > > @@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
> > > > static inline void vma_start_write(struct vm_area_struct *vma) {}
> > > > static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> > > > { mmap_assert_write_locked(vma->vm_mm); }
> > > > +static inline void vma_assert_attached(struct vm_area_struct *vma) {}
> > > > +static inline void vma_assert_detached(struct vm_area_struct *vma) {}
> > > > static inline void vma_mark_attached(struct vm_area_struct *vma) {}
> > > > static inline void vma_mark_detached(struct vm_area_struct *vma) {}
> > > >
> > > > diff --git a/mm/vma.c b/mm/vma.c
> > > > index d603494e69d7..b9cf552e120c 100644
> > > > --- a/mm/vma.c
> > > > +++ b/mm/vma.c
> > > > @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
> > > > vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
> > > >
> > > > if (expanded)
> > > > - vma_iter_store(vmg->vmi, vmg->vma);
> > > > + vma_iter_store_attached(vmg->vmi, vmg->vma);
> > > >
> > > > if (adj_start) {
> > > > adjust->vm_start += adj_start;
> > > > adjust->vm_pgoff += PHYS_PFN(adj_start);
> > > > if (adj_start < 0) {
> > > > WARN_ON(expanded);
> > > > - vma_iter_store(vmg->vmi, adjust);
> > > > + vma_iter_store_attached(vmg->vmi, adjust);
> > > > }
> > > > }
> > >
> > > I kind of feel this whole function (that yes, I added :>) though derived
> > > from existing logic) needs rework, as it's necessarily rather confusing.
> > >
> > > But hey, that's on me :)
> > >
> > > But this does look right... OK see this as a note-to-self...
> > >
> > > >
> > > > @@ -2845,7 +2845,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> > > > anon_vma_interval_tree_pre_update_vma(vma);
> > > > vma->vm_end = address;
> > > > /* Overwrite old entry in mtree. */
> > > > - vma_iter_store(&vmi, vma);
> > > > + vma_iter_store_attached(&vmi, vma);
> > > > anon_vma_interval_tree_post_update_vma(vma);
> > > >
> > > > perf_event_mmap(vma);
> > > > @@ -2925,7 +2925,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
> > > > vma->vm_start = address;
> > > > vma->vm_pgoff -= grow;
> > > > /* Overwrite old entry in mtree. */
> > > > - vma_iter_store(&vmi, vma);
> > > > + vma_iter_store_attached(&vmi, vma);
> > > > anon_vma_interval_tree_post_update_vma(vma);
> > > >
> > > > perf_event_mmap(vma);
> > > > diff --git a/mm/vma.h b/mm/vma.h
> > > > index 2a2668de8d2c..63dd38d5230c 100644
> > > > --- a/mm/vma.h
> > > > +++ b/mm/vma.h
> > > > @@ -365,9 +365,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
> > > > }
> > > >
> > > > /* Store a VMA with preallocated memory */
> > > > -static inline void vma_iter_store(struct vma_iterator *vmi,
> > > > - struct vm_area_struct *vma)
> > > > +static inline void vma_iter_store_attached(struct vma_iterator *vmi,
> > > > + struct vm_area_struct *vma)
> > > > {
> > > > + vma_assert_attached(vma);
> > > >
> > > > #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> > > > if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
> > > > @@ -390,7 +391,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
> > > >
> > > > __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
> > > > mas_store_prealloc(&vmi->mas, vma);
> > > > +}
> > > > +
> > > > +static inline void vma_iter_store(struct vma_iterator *vmi,
> > > > + struct vm_area_struct *vma)
> > > > +{
> > > > vma_mark_attached(vma);
> > > > + vma_iter_store_attached(vmi, vma);
> > > > }
> > > >
> > >
> > > See comment at top, and we need some comments here to explain why we're
> > > going to pains to do this.
> >
> > Ack. I'll amend the patch description to make that clear.
> >
> > >
> > > What about mm/nommu.c? I guess these cases are always new VMAs.
> >
> > CONFIG_PER_VMA_LOCK depends on !CONFIG_NOMMU, so for nommu case all
> > these attach/detach functions become NOPs.
> >
> > >
> > > We probably definitely need to check this series in a nommu setup, have you
> > > done this? As I can see this breaking things. Then again I suppose you'd
> > > have expected bots to moan by now...
> > >
> > > > static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
> > > > --
> > > > 2.47.1.613.gc27f4b7a9f-goog
> > > >
On Mon, Jan 13, 2025 at 11:09:25AM -0800, Suren Baghdasaryan wrote:
> On Mon, Jan 13, 2025 at 8:48 AM Lorenzo Stoakes
> <lorenzo.stoakes@oracle.com> wrote:
> >
> > On Mon, Jan 13, 2025 at 08:31:45AM -0800, Suren Baghdasaryan wrote:
> > > On Mon, Jan 13, 2025 at 3:58 AM Lorenzo Stoakes
> > > <lorenzo.stoakes@oracle.com> wrote:
> > > >
> > > > On Fri, Jan 10, 2025 at 08:25:51PM -0800, Suren Baghdasaryan wrote:
> > > > > vma_iter_store() functions can be used both when adding a new vma and
> > > > > when updating an existing one. However for existing ones we do not need
> > > > > to mark them attached as they are already marked that way. Introduce
> > > > > vma_iter_store_attached() to be used with already attached vmas.
> > > >
> > > > OK I guess the intent of this is to reinstate the previously existing
> > > > asserts, only explicitly checking those places where we attach.
> > >
> > > No, the motivation is to prevern re-attaching an already attached vma
> > > or re-detaching an already detached vma for state consistency. I guess
> > > I should amend the description to make that clear.
> >
> > Sorry for noise, missed this reply.
> >
> > What I mean by this is, in a past iteration of this series I reviewed code
> > where you did this but did _not_ differentiate between cases of new VMAs
> > vs. existing, which caused an assert in your series which I reported.
> >
> > So I"m saying - now you _are_ differentiating between the two cases.
> >
> > It's certainly worth belabouring the point of exactly what it is you are
> > trying to catch here, however! :) So yes please do add a little more to
> > commit msg that'd be great, thanks!
>
> Sure. How about:
>
> With vma->detached being a separate flag, double-marking a vmas as
> attached or detached is not an issue because the flag will simply be
> overwritten with the same value. However once we fold this flag into
> the refcount later in this series, re-attaching or re-detaching a vma
> becomes an issue since these operations will be
> incrementing/decrementing a refcount. Fix the places where we
> currently re-attaching a vma during vma update and add assertions in
> vma_mark_attached()/vma_mark_detached() to catch invalid usage.
That's awesome, thanks!
>
> >
> > >
> > > >
> > > > I'm a little concerned that by doing this, somebody might simply invoke
> > > > this function without realising the implications.
> > >
> > > Well, in that case somebody should get an assertion. If
> > > vma_iter_store() is called against already attached vma, we get this
> > > assertion:
> > >
> > > vma_iter_store()
> > > vma_mark_attached()
> > > vma_assert_detached()
> > >
> > > If vma_iter_store_attached() is called against a detached vma, we get this one:
> > >
> > > vma_iter_store_attached()
> > > vma_assert_attached()
> > >
> > > Does that address your concern?
> > >
> > > >
> > > > Can we have something functional like
> > > >
> > > > vma_iter_store_new() and vma_iter_store_overwrite()
> > >
> > > Ok. A bit more churn but should not be too bad.
> > >
> > > >
> > > > ?
> > > >
> > > > I don't like us just leaving vma_iter_store() quietly making an assumption
> > > > that a caller doesn't necessarily realise.
> > > >
> > > > Also it's more greppable this way.
> > > >
> > > > I had a look through callers and it does seem you've snagged them all
> > > > correctly.
> > > >
> > > > >
> > > > > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > > > > Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> > > > > ---
> > > > > include/linux/mm.h | 12 ++++++++++++
> > > > > mm/vma.c | 8 ++++----
> > > > > mm/vma.h | 11 +++++++++--
> > > > > 3 files changed, 25 insertions(+), 6 deletions(-)
> > > > >
> > > > > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > > > > index 2b322871da87..2f805f1a0176 100644
> > > > > --- a/include/linux/mm.h
> > > > > +++ b/include/linux/mm.h
> > > > > @@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
> > > > > vma_assert_write_locked(vma);
> > > > > }
> > > > >
> > > > > +static inline void vma_assert_attached(struct vm_area_struct *vma)
> > > > > +{
> > > > > + VM_BUG_ON_VMA(vma->detached, vma);
> > > > > +}
> > > > > +
> > > > > +static inline void vma_assert_detached(struct vm_area_struct *vma)
> > > > > +{
> > > > > + VM_BUG_ON_VMA(!vma->detached, vma);
> > > > > +}
> > > > > +
> > > > > static inline void vma_mark_attached(struct vm_area_struct *vma)
> > > > > {
> > > > > vma->detached = false;
> > > > > @@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
> > > > > static inline void vma_start_write(struct vm_area_struct *vma) {}
> > > > > static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> > > > > { mmap_assert_write_locked(vma->vm_mm); }
> > > > > +static inline void vma_assert_attached(struct vm_area_struct *vma) {}
> > > > > +static inline void vma_assert_detached(struct vm_area_struct *vma) {}
> > > > > static inline void vma_mark_attached(struct vm_area_struct *vma) {}
> > > > > static inline void vma_mark_detached(struct vm_area_struct *vma) {}
> > > > >
> > > > > diff --git a/mm/vma.c b/mm/vma.c
> > > > > index d603494e69d7..b9cf552e120c 100644
> > > > > --- a/mm/vma.c
> > > > > +++ b/mm/vma.c
> > > > > @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
> > > > > vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
> > > > >
> > > > > if (expanded)
> > > > > - vma_iter_store(vmg->vmi, vmg->vma);
> > > > > + vma_iter_store_attached(vmg->vmi, vmg->vma);
> > > > >
> > > > > if (adj_start) {
> > > > > adjust->vm_start += adj_start;
> > > > > adjust->vm_pgoff += PHYS_PFN(adj_start);
> > > > > if (adj_start < 0) {
> > > > > WARN_ON(expanded);
> > > > > - vma_iter_store(vmg->vmi, adjust);
> > > > > + vma_iter_store_attached(vmg->vmi, adjust);
> > > > > }
> > > > > }
> > > >
> > > > I kind of feel this whole function (that yes, I added :>) though derived
> > > > from existing logic) needs rework, as it's necessarily rather confusing.
> > > >
> > > > But hey, that's on me :)
> > > >
> > > > But this does look right... OK see this as a note-to-self...
> > > >
> > > > >
> > > > > @@ -2845,7 +2845,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> > > > > anon_vma_interval_tree_pre_update_vma(vma);
> > > > > vma->vm_end = address;
> > > > > /* Overwrite old entry in mtree. */
> > > > > - vma_iter_store(&vmi, vma);
> > > > > + vma_iter_store_attached(&vmi, vma);
> > > > > anon_vma_interval_tree_post_update_vma(vma);
> > > > >
> > > > > perf_event_mmap(vma);
> > > > > @@ -2925,7 +2925,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
> > > > > vma->vm_start = address;
> > > > > vma->vm_pgoff -= grow;
> > > > > /* Overwrite old entry in mtree. */
> > > > > - vma_iter_store(&vmi, vma);
> > > > > + vma_iter_store_attached(&vmi, vma);
> > > > > anon_vma_interval_tree_post_update_vma(vma);
> > > > >
> > > > > perf_event_mmap(vma);
> > > > > diff --git a/mm/vma.h b/mm/vma.h
> > > > > index 2a2668de8d2c..63dd38d5230c 100644
> > > > > --- a/mm/vma.h
> > > > > +++ b/mm/vma.h
> > > > > @@ -365,9 +365,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
> > > > > }
> > > > >
> > > > > /* Store a VMA with preallocated memory */
> > > > > -static inline void vma_iter_store(struct vma_iterator *vmi,
> > > > > - struct vm_area_struct *vma)
> > > > > +static inline void vma_iter_store_attached(struct vma_iterator *vmi,
> > > > > + struct vm_area_struct *vma)
> > > > > {
> > > > > + vma_assert_attached(vma);
> > > > >
> > > > > #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> > > > > if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
> > > > > @@ -390,7 +391,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
> > > > >
> > > > > __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
> > > > > mas_store_prealloc(&vmi->mas, vma);
> > > > > +}
> > > > > +
> > > > > +static inline void vma_iter_store(struct vma_iterator *vmi,
> > > > > + struct vm_area_struct *vma)
> > > > > +{
> > > > > vma_mark_attached(vma);
> > > > > + vma_iter_store_attached(vmi, vma);
> > > > > }
> > > > >
> > > >
> > > > See comment at top, and we need some comments here to explain why we're
> > > > going to pains to do this.
> > >
> > > Ack. I'll amend the patch description to make that clear.
> > >
> > > >
> > > > What about mm/nommu.c? I guess these cases are always new VMAs.
> > >
> > > CONFIG_PER_VMA_LOCK depends on !CONFIG_NOMMU, so for nommu case all
> > > these attach/detach functions become NOPs.
> > >
> > > >
> > > > We probably definitely need to check this series in a nommu setup, have you
> > > > done this? As I can see this breaking things. Then again I suppose you'd
> > > > have expected bots to moan by now...
> > > >
> > > > > static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
> > > > > --
> > > > > 2.47.1.613.gc27f4b7a9f-goog
> > > > >
On Mon, Jan 13, 2025 at 08:31:45AM -0800, Suren Baghdasaryan wrote:
> On Mon, Jan 13, 2025 at 3:58 AM Lorenzo Stoakes
> <lorenzo.stoakes@oracle.com> wrote:
> >
> > On Fri, Jan 10, 2025 at 08:25:51PM -0800, Suren Baghdasaryan wrote:
> > > vma_iter_store() functions can be used both when adding a new vma and
> > > when updating an existing one. However for existing ones we do not need
> > > to mark them attached as they are already marked that way. Introduce
> > > vma_iter_store_attached() to be used with already attached vmas.
> >
> > OK I guess the intent of this is to reinstate the previously existing
> > asserts, only explicitly checking those places where we attach.
>
> No, the motivation is to prevern re-attaching an already attached vma
> or re-detaching an already detached vma for state consistency. I guess
> I should amend the description to make that clear.
>
> >
> > I'm a little concerned that by doing this, somebody might simply invoke
> > this function without realising the implications.
>
> Well, in that case somebody should get an assertion. If
> vma_iter_store() is called against already attached vma, we get this
> assertion:
>
> vma_iter_store()
> vma_mark_attached()
> vma_assert_detached()
>
> If vma_iter_store_attached() is called against a detached vma, we get this one:
>
> vma_iter_store_attached()
> vma_assert_attached()
>
> Does that address your concern?
Well the issue is that you might only get that assertion in some code path
that isn't immediately exercised by code the bots run :)
See my comment re: testing to 00/17 (though I absolutely accept testing
this is a giant pain).
But yes we are protected in cases where a user has CONFIG_DEBUG_VM turned
on.
I honestly wonder if we need to be stronger than that though, it's really
serious if we do this wrong isn't it? Maybe it should be a WARN_ON_ONCE()
or something?
In any case, a rename means somebody isn't going to do this by mistake.
>
> >
> > Can we have something functional like
> >
> > vma_iter_store_new() and vma_iter_store_overwrite()
>
> Ok. A bit more churn but should not be too bad.
Yeah sorry for churn (though hey - I _am_ the churn king right? ;) - but I
think in this case it's really valuable for understanding.
>
> >
> > ?
> >
> > I don't like us just leaving vma_iter_store() quietly making an assumption
> > that a caller doesn't necessarily realise.
> >
> > Also it's more greppable this way.
> >
> > I had a look through callers and it does seem you've snagged them all
> > correctly.
> >
> > >
> > > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > > Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
> > > ---
> > > include/linux/mm.h | 12 ++++++++++++
> > > mm/vma.c | 8 ++++----
> > > mm/vma.h | 11 +++++++++--
> > > 3 files changed, 25 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/include/linux/mm.h b/include/linux/mm.h
> > > index 2b322871da87..2f805f1a0176 100644
> > > --- a/include/linux/mm.h
> > > +++ b/include/linux/mm.h
> > > @@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
> > > vma_assert_write_locked(vma);
> > > }
> > >
> > > +static inline void vma_assert_attached(struct vm_area_struct *vma)
> > > +{
> > > + VM_BUG_ON_VMA(vma->detached, vma);
> > > +}
> > > +
> > > +static inline void vma_assert_detached(struct vm_area_struct *vma)
> > > +{
> > > + VM_BUG_ON_VMA(!vma->detached, vma);
> > > +}
> > > +
> > > static inline void vma_mark_attached(struct vm_area_struct *vma)
> > > {
> > > vma->detached = false;
> > > @@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
> > > static inline void vma_start_write(struct vm_area_struct *vma) {}
> > > static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> > > { mmap_assert_write_locked(vma->vm_mm); }
> > > +static inline void vma_assert_attached(struct vm_area_struct *vma) {}
> > > +static inline void vma_assert_detached(struct vm_area_struct *vma) {}
> > > static inline void vma_mark_attached(struct vm_area_struct *vma) {}
> > > static inline void vma_mark_detached(struct vm_area_struct *vma) {}
> > >
> > > diff --git a/mm/vma.c b/mm/vma.c
> > > index d603494e69d7..b9cf552e120c 100644
> > > --- a/mm/vma.c
> > > +++ b/mm/vma.c
> > > @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg,
> > > vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
> > >
> > > if (expanded)
> > > - vma_iter_store(vmg->vmi, vmg->vma);
> > > + vma_iter_store_attached(vmg->vmi, vmg->vma);
> > >
> > > if (adj_start) {
> > > adjust->vm_start += adj_start;
> > > adjust->vm_pgoff += PHYS_PFN(adj_start);
> > > if (adj_start < 0) {
> > > WARN_ON(expanded);
> > > - vma_iter_store(vmg->vmi, adjust);
> > > + vma_iter_store_attached(vmg->vmi, adjust);
> > > }
> > > }
> >
> > I kind of feel this whole function (that yes, I added :>) though derived
> > from existing logic) needs rework, as it's necessarily rather confusing.
> >
> > But hey, that's on me :)
> >
> > But this does look right... OK see this as a note-to-self...
> >
> > >
> > > @@ -2845,7 +2845,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> > > anon_vma_interval_tree_pre_update_vma(vma);
> > > vma->vm_end = address;
> > > /* Overwrite old entry in mtree. */
> > > - vma_iter_store(&vmi, vma);
> > > + vma_iter_store_attached(&vmi, vma);
> > > anon_vma_interval_tree_post_update_vma(vma);
> > >
> > > perf_event_mmap(vma);
> > > @@ -2925,7 +2925,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
> > > vma->vm_start = address;
> > > vma->vm_pgoff -= grow;
> > > /* Overwrite old entry in mtree. */
> > > - vma_iter_store(&vmi, vma);
> > > + vma_iter_store_attached(&vmi, vma);
> > > anon_vma_interval_tree_post_update_vma(vma);
> > >
> > > perf_event_mmap(vma);
> > > diff --git a/mm/vma.h b/mm/vma.h
> > > index 2a2668de8d2c..63dd38d5230c 100644
> > > --- a/mm/vma.h
> > > +++ b/mm/vma.h
> > > @@ -365,9 +365,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
> > > }
> > >
> > > /* Store a VMA with preallocated memory */
> > > -static inline void vma_iter_store(struct vma_iterator *vmi,
> > > - struct vm_area_struct *vma)
> > > +static inline void vma_iter_store_attached(struct vma_iterator *vmi,
> > > + struct vm_area_struct *vma)
> > > {
> > > + vma_assert_attached(vma);
> > >
> > > #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> > > if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
> > > @@ -390,7 +391,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
> > >
> > > __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
> > > mas_store_prealloc(&vmi->mas, vma);
> > > +}
> > > +
> > > +static inline void vma_iter_store(struct vma_iterator *vmi,
> > > + struct vm_area_struct *vma)
> > > +{
> > > vma_mark_attached(vma);
> > > + vma_iter_store_attached(vmi, vma);
> > > }
> > >
> >
> > See comment at top, and we need some comments here to explain why we're
> > going to pains to do this.
>
> Ack. I'll amend the patch description to make that clear.
Thanks!
>
> >
> > What about mm/nommu.c? I guess these cases are always new VMAs.
>
> CONFIG_PER_VMA_LOCK depends on !CONFIG_NOMMU, so for nommu case all
> these attach/detach functions become NOPs.
Ack. OK good, I usually like to pretend nommu doesn't exist, but sometimes
have to ack that it does, now in this case I can go back to not
caring... :>)
>
> >
> > We probably definitely need to check this series in a nommu setup, have you
> > done this? As I can see this breaking things. Then again I suppose you'd
> > have expected bots to moan by now...
> >
> > > static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
> > > --
> > > 2.47.1.613.gc27f4b7a9f-goog
> > >
© 2016 - 2026 Red Hat, Inc.