We don't actually need to return an output parameter providing mm sequence
number, rather we can separate that out into another function -
__vma_raw_mm_seqnum() - and have any callers which need to obtain that
invoke that instead.
The access to the raw sequence number requires that we hold the exclusive
mmap lock such that we know we can't race vma_end_write_all(), so move the
assert to __vma_raw_mm_seqnum() to make this requirement clear.
Also while we're here, convert all of the VM_BUG_ON_VMA()'s to
VM_WARN_ON_ONCE_VMA()'s in line with the convention that we do not invoke
oopses when we can avoid it.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
include/linux/mmap_lock.h | 44 ++++++++++++++++++++++-----------------
1 file changed, 25 insertions(+), 19 deletions(-)
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 678f90080fa6..23bde4bd5a85 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -258,17 +258,30 @@ static inline void vma_end_read(struct vm_area_struct *vma)
vma_refcount_put(vma);
}
-/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
-static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
+static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
{
+ const struct mm_struct *mm = vma->vm_mm;
+
+ /* We must hold an exclusive write lock for this access to be valid. */
mmap_assert_write_locked(vma->vm_mm);
+ return mm->mm_lock_seq.sequence;
+}
+/*
+ * Determine whether a VMA is write-locked. Must be invoked ONLY if the mmap
+ * write lock is held.
+ *
+ * Returns true if write-locked, otherwise false.
+ *
+ * Note that mm_lock_seq is updated only if the VMA is NOT write-locked.
+ */
+static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
+{
/*
* current task is holding mmap_write_lock, both vma->vm_lock_seq and
* mm->mm_lock_seq can't be concurrently modified.
*/
- *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
- return (vma->vm_lock_seq == *mm_lock_seq);
+ return vma->vm_lock_seq == __vma_raw_mm_seqnum(vma);
}
int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
@@ -281,12 +294,10 @@ int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
*/
static inline void vma_start_write(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
- if (__is_vma_write_locked(vma, &mm_lock_seq))
+ if (__is_vma_write_locked(vma))
return;
- __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE);
+ __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_UNINTERRUPTIBLE);
}
/**
@@ -305,30 +316,25 @@ static inline void vma_start_write(struct vm_area_struct *vma)
static inline __must_check
int vma_start_write_killable(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
- if (__is_vma_write_locked(vma, &mm_lock_seq))
+ if (__is_vma_write_locked(vma))
return 0;
- return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE);
+
+ return __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_KILLABLE);
}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
- VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+ VM_WARN_ON_ONCE_VMA(!__is_vma_write_locked(vma), vma);
}
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
-
/*
* See the comment describing the vm_area_struct->vm_refcnt field for
* details of possible refcnt values.
*/
- VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
- !__is_vma_write_locked(vma, &mm_lock_seq), vma);
+ VM_WARN_ON_ONCE_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
+ !__is_vma_write_locked(vma), vma);
}
static inline bool vma_is_attached(struct vm_area_struct *vma)
--
2.52.0
Hi Andrew,
Could you apply the attached fix-patch to address Vlasta's nits? Thanks! :)
Cheers, Lorenzo
----8<----
From d6f05f5f0c2ada298e90deaaf3eefdcabc4d344a Mon Sep 17 00:00:00 2001
From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Date: Mon, 26 Jan 2026 16:23:16 +0000
Subject: [PATCH] fix
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
include/linux/mmap_lock.h | 9 +++------
mm/mmap_lock.c | 6 +++---
2 files changed, 6 insertions(+), 9 deletions(-)
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 23bde4bd5a85..1746a172a81c 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -272,8 +272,6 @@ static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
* write lock is held.
*
* Returns true if write-locked, otherwise false.
- *
- * Note that mm_lock_seq is updated only if the VMA is NOT write-locked.
*/
static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
{
@@ -284,8 +282,7 @@ static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
return vma->vm_lock_seq == __vma_raw_mm_seqnum(vma);
}
-int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
- int state);
+int __vma_start_write(struct vm_area_struct *vma, int state);
/*
* Begin writing to a VMA.
@@ -297,7 +294,7 @@ static inline void vma_start_write(struct vm_area_struct *vma)
if (__is_vma_write_locked(vma))
return;
- __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_UNINTERRUPTIBLE);
+ __vma_start_write(vma, TASK_UNINTERRUPTIBLE);
}
/**
@@ -319,7 +316,7 @@ int vma_start_write_killable(struct vm_area_struct *vma)
if (__is_vma_write_locked(vma))
return 0;
- return __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_KILLABLE);
+ return __vma_start_write(vma, TASK_KILLABLE);
}
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
index b523a3fe110c..a9ad6a573270 100644
--- a/mm/mmap_lock.c
+++ b/mm/mmap_lock.c
@@ -136,14 +136,14 @@ static int __vma_start_exclude_readers(struct vma_exclude_readers_state *ves)
return 0;
}
-int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
- int state)
+int __vma_start_write(struct vm_area_struct *vma, int state)
{
- int err;
+ const unsigned int mm_lock_seq = __vma_raw_mm_seqnum(vma);
struct vma_exclude_readers_state ves = {
.vma = vma,
.state = state,
};
+ int err;
err = __vma_start_exclude_readers(&ves);
if (err) {
--
2.52.0
On 1/23/26 21:12, Lorenzo Stoakes wrote:
> We don't actually need to return an output parameter providing mm sequence
> number, rather we can separate that out into another function -
> __vma_raw_mm_seqnum() - and have any callers which need to obtain that
> invoke that instead.
>
> The access to the raw sequence number requires that we hold the exclusive
> mmap lock such that we know we can't race vma_end_write_all(), so move the
> assert to __vma_raw_mm_seqnum() to make this requirement clear.
>
> Also while we're here, convert all of the VM_BUG_ON_VMA()'s to
> VM_WARN_ON_ONCE_VMA()'s in line with the convention that we do not invoke
> oopses when we can avoid it.
>
> Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Few nits:
> ---
> include/linux/mmap_lock.h | 44 ++++++++++++++++++++++-----------------
> 1 file changed, 25 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> index 678f90080fa6..23bde4bd5a85 100644
> --- a/include/linux/mmap_lock.h
> +++ b/include/linux/mmap_lock.h
> @@ -258,17 +258,30 @@ static inline void vma_end_read(struct vm_area_struct *vma)
> vma_refcount_put(vma);
> }
>
> -/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
> -static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
> +static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
> {
> + const struct mm_struct *mm = vma->vm_mm;
> +
> + /* We must hold an exclusive write lock for this access to be valid. */
> mmap_assert_write_locked(vma->vm_mm);
> + return mm->mm_lock_seq.sequence;
> +}
>
> +/*
> + * Determine whether a VMA is write-locked. Must be invoked ONLY if the mmap
> + * write lock is held.
> + *
> + * Returns true if write-locked, otherwise false.
> + *
> + * Note that mm_lock_seq is updated only if the VMA is NOT write-locked.
This line is no longer applicable.
> + */
> +static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
> +{
> /*
> * current task is holding mmap_write_lock, both vma->vm_lock_seq and
> * mm->mm_lock_seq can't be concurrently modified.
> */
> - *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
> - return (vma->vm_lock_seq == *mm_lock_seq);
> + return vma->vm_lock_seq == __vma_raw_mm_seqnum(vma);
> }
>
> int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
> @@ -281,12 +294,10 @@ int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
> */
> static inline void vma_start_write(struct vm_area_struct *vma)
> {
> - unsigned int mm_lock_seq;
> -
> - if (__is_vma_write_locked(vma, &mm_lock_seq))
> + if (__is_vma_write_locked(vma))
> return;
>
> - __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE);
> + __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_UNINTERRUPTIBLE);
At this point I think __vma_start_write() could just perform
__vma_raw_mm_seqnum() itself and we can remove the param.
It could possibly make the inline code smaller.
On Mon, Jan 26, 2026 at 12:30:04PM +0100, Vlastimil Babka wrote:
> On 1/23/26 21:12, Lorenzo Stoakes wrote:
> > We don't actually need to return an output parameter providing mm sequence
> > number, rather we can separate that out into another function -
> > __vma_raw_mm_seqnum() - and have any callers which need to obtain that
> > invoke that instead.
> >
> > The access to the raw sequence number requires that we hold the exclusive
> > mmap lock such that we know we can't race vma_end_write_all(), so move the
> > assert to __vma_raw_mm_seqnum() to make this requirement clear.
> >
> > Also while we're here, convert all of the VM_BUG_ON_VMA()'s to
> > VM_WARN_ON_ONCE_VMA()'s in line with the convention that we do not invoke
> > oopses when we can avoid it.
> >
> > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
>
> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Thanks!
>
> Few nits:
>
> > ---
> > include/linux/mmap_lock.h | 44 ++++++++++++++++++++++-----------------
> > 1 file changed, 25 insertions(+), 19 deletions(-)
> >
> > diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> > index 678f90080fa6..23bde4bd5a85 100644
> > --- a/include/linux/mmap_lock.h
> > +++ b/include/linux/mmap_lock.h
> > @@ -258,17 +258,30 @@ static inline void vma_end_read(struct vm_area_struct *vma)
> > vma_refcount_put(vma);
> > }
> >
> > -/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
> > -static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
> > +static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
> > {
> > + const struct mm_struct *mm = vma->vm_mm;
> > +
> > + /* We must hold an exclusive write lock for this access to be valid. */
> > mmap_assert_write_locked(vma->vm_mm);
> > + return mm->mm_lock_seq.sequence;
> > +}
> >
> > +/*
> > + * Determine whether a VMA is write-locked. Must be invoked ONLY if the mmap
> > + * write lock is held.
> > + *
> > + * Returns true if write-locked, otherwise false.
> > + *
> > + * Note that mm_lock_seq is updated only if the VMA is NOT write-locked.
>
> This line is no longer applicable.
Is there for nostalgia's sake! :P
OK maybe not...
>
> > + */
> > +static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
> > +{
> > /*
> > * current task is holding mmap_write_lock, both vma->vm_lock_seq and
> > * mm->mm_lock_seq can't be concurrently modified.
> > */
> > - *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
> > - return (vma->vm_lock_seq == *mm_lock_seq);
> > + return vma->vm_lock_seq == __vma_raw_mm_seqnum(vma);
> > }
> >
> > int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
> > @@ -281,12 +294,10 @@ int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
> > */
> > static inline void vma_start_write(struct vm_area_struct *vma)
> > {
> > - unsigned int mm_lock_seq;
> > -
> > - if (__is_vma_write_locked(vma, &mm_lock_seq))
> > + if (__is_vma_write_locked(vma))
> > return;
> >
> > - __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE);
> > + __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_UNINTERRUPTIBLE);
>
> At this point I think __vma_start_write() could just perform
> __vma_raw_mm_seqnum() itself and we can remove the param.
> It could possibly make the inline code smaller.
>
Good idea!
Will send fix-patch for both.
Thanks, Lorenzo
On Mon, Jan 26, 2026 at 8:29 AM Lorenzo Stoakes
<lorenzo.stoakes@oracle.com> wrote:
>
> On Mon, Jan 26, 2026 at 12:30:04PM +0100, Vlastimil Babka wrote:
> > On 1/23/26 21:12, Lorenzo Stoakes wrote:
> > > We don't actually need to return an output parameter providing mm sequence
> > > number, rather we can separate that out into another function -
> > > __vma_raw_mm_seqnum() - and have any callers which need to obtain that
> > > invoke that instead.
> > >
> > > The access to the raw sequence number requires that we hold the exclusive
> > > mmap lock such that we know we can't race vma_end_write_all(), so move the
> > > assert to __vma_raw_mm_seqnum() to make this requirement clear.
> > >
> > > Also while we're here, convert all of the VM_BUG_ON_VMA()'s to
> > > VM_WARN_ON_ONCE_VMA()'s in line with the convention that we do not invoke
> > > oopses when we can avoid it.
> > >
> > > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> >
> > Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Sorry but I have one more comment below.
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
>
> Thanks!
>
> >
> > Few nits:
> >
> > > ---
> > > include/linux/mmap_lock.h | 44 ++++++++++++++++++++++-----------------
> > > 1 file changed, 25 insertions(+), 19 deletions(-)
> > >
> > > diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> > > index 678f90080fa6..23bde4bd5a85 100644
> > > --- a/include/linux/mmap_lock.h
> > > +++ b/include/linux/mmap_lock.h
> > > @@ -258,17 +258,30 @@ static inline void vma_end_read(struct vm_area_struct *vma)
> > > vma_refcount_put(vma);
> > > }
> > >
> > > -/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
> > > -static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
> > > +static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
This function returns the mm->mm_lock_seq.sequence attribute of mm, so
no real commection to VMA. IMO it's better to rename it into
__raw_mm_lock_seqnum(const struct mm_struct *mm) and have the callers
pass vma->vm_mm.
> > > {
> > > + const struct mm_struct *mm = vma->vm_mm;
> > > +
> > > + /* We must hold an exclusive write lock for this access to be valid. */
> > > mmap_assert_write_locked(vma->vm_mm);
If for some reason you need to keep this function VMA-centric, then in
the above line please s/vma->vm_mm/mm
> > > + return mm->mm_lock_seq.sequence;
> > > +}
> > >
> > > +/*
> > > + * Determine whether a VMA is write-locked. Must be invoked ONLY if the mmap
> > > + * write lock is held.
> > > + *
> > > + * Returns true if write-locked, otherwise false.
> > > + *
> > > + * Note that mm_lock_seq is updated only if the VMA is NOT write-locked.
> >
> > This line is no longer applicable.
>
> Is there for nostalgia's sake! :P
>
> OK maybe not...
>
> >
> > > + */
> > > +static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
> > > +{
> > > /*
> > > * current task is holding mmap_write_lock, both vma->vm_lock_seq and
> > > * mm->mm_lock_seq can't be concurrently modified.
> > > */
> > > - *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
> > > - return (vma->vm_lock_seq == *mm_lock_seq);
> > > + return vma->vm_lock_seq == __vma_raw_mm_seqnum(vma);
> > > }
> > >
> > > int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
> > > @@ -281,12 +294,10 @@ int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
> > > */
> > > static inline void vma_start_write(struct vm_area_struct *vma)
> > > {
> > > - unsigned int mm_lock_seq;
> > > -
> > > - if (__is_vma_write_locked(vma, &mm_lock_seq))
> > > + if (__is_vma_write_locked(vma))
> > > return;
> > >
> > > - __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE);
> > > + __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_UNINTERRUPTIBLE);
> >
> > At this point I think __vma_start_write() could just perform
> > __vma_raw_mm_seqnum() itself and we can remove the param.
> > It could possibly make the inline code smaller.
> >
>
> Good idea!
>
> Will send fix-patch for both.
>
> Thanks, Lorenzo
On Mon, Jan 26, 2026 at 11:21:29AM -0800, Suren Baghdasaryan wrote:
> On Mon, Jan 26, 2026 at 8:29 AM Lorenzo Stoakes
> <lorenzo.stoakes@oracle.com> wrote:
> >
> > On Mon, Jan 26, 2026 at 12:30:04PM +0100, Vlastimil Babka wrote:
> > > On 1/23/26 21:12, Lorenzo Stoakes wrote:
> > > > We don't actually need to return an output parameter providing mm sequence
> > > > number, rather we can separate that out into another function -
> > > > __vma_raw_mm_seqnum() - and have any callers which need to obtain that
> > > > invoke that instead.
> > > >
> > > > The access to the raw sequence number requires that we hold the exclusive
> > > > mmap lock such that we know we can't race vma_end_write_all(), so move the
> > > > assert to __vma_raw_mm_seqnum() to make this requirement clear.
> > > >
> > > > Also while we're here, convert all of the VM_BUG_ON_VMA()'s to
> > > > VM_WARN_ON_ONCE_VMA()'s in line with the convention that we do not invoke
> > > > oopses when we can avoid it.
> > > >
> > > > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> > >
> > > Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
>
> Sorry but I have one more comment below.
>
> Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Thanks!
>
> >
> > Thanks!
> >
> > >
> > > Few nits:
> > >
> > > > ---
> > > > include/linux/mmap_lock.h | 44 ++++++++++++++++++++++-----------------
> > > > 1 file changed, 25 insertions(+), 19 deletions(-)
> > > >
> > > > diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> > > > index 678f90080fa6..23bde4bd5a85 100644
> > > > --- a/include/linux/mmap_lock.h
> > > > +++ b/include/linux/mmap_lock.h
> > > > @@ -258,17 +258,30 @@ static inline void vma_end_read(struct vm_area_struct *vma)
> > > > vma_refcount_put(vma);
> > > > }
> > > >
> > > > -/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
> > > > -static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
> > > > +static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
>
> This function returns the mm->mm_lock_seq.sequence attribute of mm, so
> no real commection to VMA. IMO it's better to rename it into
> __raw_mm_lock_seqnum(const struct mm_struct *mm) and have the callers
> pass vma->vm_mm.
Sorry missed these comments.
We are only ever referencing in terms of VMA's, so I think it makes sense still
to pass a VMA ptr, even if it just references vma->vm_mm.
I think the name of the function also makes things pretty clear, given it's
called essentially 'VMA['s] raw mm seqnum'.
>
> > > > {
> > > > + const struct mm_struct *mm = vma->vm_mm;
> > > > +
> > > > + /* We must hold an exclusive write lock for this access to be valid. */
> > > > mmap_assert_write_locked(vma->vm_mm);
>
> If for some reason you need to keep this function VMA-centric, then in
> the above line please s/vma->vm_mm/mm
Ah yeah, maybe if there's a respin? As kinda trivial thing, if that makes sense
to you!
Cheers, Lorenzo
On 1/28/26 12:51, Lorenzo Stoakes wrote:
>>
>> >
>> > Thanks!
>> >
>> > >
>> > > Few nits:
>> > >
>> > > > ---
>> > > > include/linux/mmap_lock.h | 44 ++++++++++++++++++++++-----------------
>> > > > 1 file changed, 25 insertions(+), 19 deletions(-)
>> > > >
>> > > > diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
>> > > > index 678f90080fa6..23bde4bd5a85 100644
>> > > > --- a/include/linux/mmap_lock.h
>> > > > +++ b/include/linux/mmap_lock.h
>> > > > @@ -258,17 +258,30 @@ static inline void vma_end_read(struct vm_area_struct *vma)
>> > > > vma_refcount_put(vma);
>> > > > }
>> > > >
>> > > > -/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
>> > > > -static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
>> > > > +static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
>>
>> This function returns the mm->mm_lock_seq.sequence attribute of mm, so
>> no real commection to VMA. IMO it's better to rename it into
>> __raw_mm_lock_seqnum(const struct mm_struct *mm) and have the callers
>> pass vma->vm_mm.
>
> Sorry missed these comments.
>
> We are only ever referencing in terms of VMA's, so I think it makes sense still
> to pass a VMA ptr, even if it just references vma->vm_mm.
FWIW I agree.
> I think the name of the function also makes things pretty clear, given it's
> called essentially 'VMA['s] raw mm seqnum'.
>
>>
>> > > > {
>> > > > + const struct mm_struct *mm = vma->vm_mm;
>> > > > +
>> > > > + /* We must hold an exclusive write lock for this access to be valid. */
>> > > > mmap_assert_write_locked(vma->vm_mm);
>>
>> If for some reason you need to keep this function VMA-centric, then in
>> the above line please s/vma->vm_mm/mm
>
> Ah yeah, maybe if there's a respin? As kinda trivial thing, if that makes sense
> to you!
>
> Cheers, Lorenzo
On Wed, Jan 28, 2026 at 5:01 AM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> On 1/28/26 12:51, Lorenzo Stoakes wrote:
> >>
> >> >
> >> > Thanks!
> >> >
> >> > >
> >> > > Few nits:
> >> > >
> >> > > > ---
> >> > > > include/linux/mmap_lock.h | 44 ++++++++++++++++++++++-----------------
> >> > > > 1 file changed, 25 insertions(+), 19 deletions(-)
> >> > > >
> >> > > > diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> >> > > > index 678f90080fa6..23bde4bd5a85 100644
> >> > > > --- a/include/linux/mmap_lock.h
> >> > > > +++ b/include/linux/mmap_lock.h
> >> > > > @@ -258,17 +258,30 @@ static inline void vma_end_read(struct vm_area_struct *vma)
> >> > > > vma_refcount_put(vma);
> >> > > > }
> >> > > >
> >> > > > -/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
> >> > > > -static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
> >> > > > +static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
> >>
> >> This function returns the mm->mm_lock_seq.sequence attribute of mm, so
> >> no real commection to VMA. IMO it's better to rename it into
> >> __raw_mm_lock_seqnum(const struct mm_struct *mm) and have the callers
> >> pass vma->vm_mm.
> >
> > Sorry missed these comments.
> >
> > We are only ever referencing in terms of VMA's, so I think it makes sense still
> > to pass a VMA ptr, even if it just references vma->vm_mm.
>
> FWIW I agree.
>
> > I think the name of the function also makes things pretty clear, given it's
> > called essentially 'VMA['s] raw mm seqnum'.
> >
> >>
> >> > > > {
> >> > > > + const struct mm_struct *mm = vma->vm_mm;
> >> > > > +
> >> > > > + /* We must hold an exclusive write lock for this access to be valid. */
> >> > > > mmap_assert_write_locked(vma->vm_mm);
> >>
> >> If for some reason you need to keep this function VMA-centric, then in
> >> the above line please s/vma->vm_mm/mm
> >
> > Ah yeah, maybe if there's a respin? As kinda trivial thing, if that makes sense
> > to you!
Ack.
> >
> > Cheers, Lorenzo
>
© 2016 - 2026 Red Hat, Inc.