We can use lockdep to avoid unnecessary work here, otherwise update the
code to logically evaluate all pertinent cases and share code with
vma_assert_write_locked().
Make it clear here that we treat the VMA being detached at this point as a
bug, this was only implicit before.
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
include/linux/mmap_lock.h | 42 ++++++++++++++++++++++++++++++++++++---
1 file changed, 39 insertions(+), 3 deletions(-)
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index b00d34b5ad10..92ea07f0da4e 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -319,6 +319,10 @@ int vma_start_write_killable(struct vm_area_struct *vma)
return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE);
}
+/**
+ * vma_assert_write_locked() - assert that @vma holds a VMA write lock.
+ * @vma: The VMA to assert.
+ */
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
unsigned int mm_lock_seq;
@@ -326,16 +330,48 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma)
VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
}
+/**
+ * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
+ * lock and is not detached.
+ * @vma: The VMA to assert.
+ */
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
- unsigned int mm_lock_seq;
+ unsigned int refs;
/*
* See the comment describing the vm_area_struct->vm_refcnt field for
* details of possible refcnt values.
*/
- VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
- !__is_vma_write_locked(vma, &mm_lock_seq), vma);
+
+ /*
+ * If read-locked or currently excluding readers, then the VMA is
+ * locked.
+ */
+#ifdef CONFIG_LOCKDEP
+ if (lock_is_held(&vma->vmlock_dep_map))
+ return;
+#endif
+
+ refs = refcount_read(&vma->vm_refcnt);
+
+ /*
+ * In this case we're either read-locked, write-locked with temporary
+ * readers, or in the midst of excluding readers, all of which means
+ * we're locked.
+ */
+ if (refs > 1)
+ return;
+
+ /* It is a bug for the VMA to be detached here. */
+ VM_BUG_ON_VMA(!refs, vma);
+
+ /*
+ * OK, the VMA has a reference count of 1 which means it is either
+ * unlocked and attached or write-locked, so assert that it is
+ * write-locked.
+ */
+ vma_assert_write_locked(vma);
}
static inline bool vma_is_attached(struct vm_area_struct *vma)
--
2.52.0
On 1/22/26 14:02, Lorenzo Stoakes wrote:
> +/**
> + * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
> + * lock and is not detached.
> + * @vma: The VMA to assert.
> + */
> static inline void vma_assert_locked(struct vm_area_struct *vma)
> {
> - unsigned int mm_lock_seq;
> + unsigned int refs;
>
> /*
> * See the comment describing the vm_area_struct->vm_refcnt field for
> * details of possible refcnt values.
> */
> - VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
> - !__is_vma_write_locked(vma, &mm_lock_seq), vma);
> +
> + /*
> + * If read-locked or currently excluding readers, then the VMA is
> + * locked.
> + */
> +#ifdef CONFIG_LOCKDEP
> + if (lock_is_held(&vma->vmlock_dep_map))
> + return;
> +#endif
> +
> + refs = refcount_read(&vma->vm_refcnt);
> +
> + /*
> + * In this case we're either read-locked, write-locked with temporary
> + * readers, or in the midst of excluding readers, all of which means
> + * we're locked.
> + */
> + if (refs > 1)
> + return;
> +
> + /* It is a bug for the VMA to be detached here. */
> + VM_BUG_ON_VMA(!refs, vma);
> +
Yeah previously this function was all VM_BUG_ON() but since that's now
frowned upon, can we not do it anymore?
Seem we do have VM_WARN_ON_ONCE_VMA().
> + /*
> + * OK, the VMA has a reference count of 1 which means it is either
> + * unlocked and attached or write-locked, so assert that it is
> + * write-locked.
> + */
> + vma_assert_write_locked(vma);
> }
>
> static inline bool vma_is_attached(struct vm_area_struct *vma)
> --
> 2.52.0
On Fri, Jan 23, 2026 at 05:55:58PM +0100, Vlastimil Babka wrote:
> On 1/22/26 14:02, Lorenzo Stoakes wrote:
> > +/**
> > + * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
> > + * lock and is not detached.
> > + * @vma: The VMA to assert.
> > + */
> > static inline void vma_assert_locked(struct vm_area_struct *vma)
> > {
> > - unsigned int mm_lock_seq;
> > + unsigned int refs;
> >
> > /*
> > * See the comment describing the vm_area_struct->vm_refcnt field for
> > * details of possible refcnt values.
> > */
> > - VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
> > - !__is_vma_write_locked(vma, &mm_lock_seq), vma);
> > +
> > + /*
> > + * If read-locked or currently excluding readers, then the VMA is
> > + * locked.
> > + */
> > +#ifdef CONFIG_LOCKDEP
> > + if (lock_is_held(&vma->vmlock_dep_map))
> > + return;
> > +#endif
> > +
> > + refs = refcount_read(&vma->vm_refcnt);
> > +
> > + /*
> > + * In this case we're either read-locked, write-locked with temporary
> > + * readers, or in the midst of excluding readers, all of which means
> > + * we're locked.
> > + */
> > + if (refs > 1)
> > + return;
> > +
> > + /* It is a bug for the VMA to be detached here. */
> > + VM_BUG_ON_VMA(!refs, vma);
> > +
>
> Yeah previously this function was all VM_BUG_ON() but since that's now
> frowned upon, can we not do it anymore?
> Seem we do have VM_WARN_ON_ONCE_VMA().
Ack yeah will replace! Already replaced some in previous patch also :)
>
> > + /*
> > + * OK, the VMA has a reference count of 1 which means it is either
> > + * unlocked and attached or write-locked, so assert that it is
> > + * write-locked.
> > + */
> > + vma_assert_write_locked(vma);
> > }
> >
> > static inline bool vma_is_attached(struct vm_area_struct *vma)
> > --
> > 2.52.0
>
On Thu, Jan 22, 2026 at 5:02 AM Lorenzo Stoakes
<lorenzo.stoakes@oracle.com> wrote:
>
> We can use lockdep to avoid unnecessary work here, otherwise update the
> code to logically evaluate all pertinent cases and share code with
> vma_assert_write_locked().
>
> Make it clear here that we treat the VMA being detached at this point as a
> bug, this was only implicit before.
>
> Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Looks correct.
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
> ---
> include/linux/mmap_lock.h | 42 ++++++++++++++++++++++++++++++++++++---
> 1 file changed, 39 insertions(+), 3 deletions(-)
>
> diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> index b00d34b5ad10..92ea07f0da4e 100644
> --- a/include/linux/mmap_lock.h
> +++ b/include/linux/mmap_lock.h
> @@ -319,6 +319,10 @@ int vma_start_write_killable(struct vm_area_struct *vma)
> return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE);
> }
>
> +/**
> + * vma_assert_write_locked() - assert that @vma holds a VMA write lock.
> + * @vma: The VMA to assert.
> + */
> static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> {
> unsigned int mm_lock_seq;
> @@ -326,16 +330,48 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
> }
>
> +/**
> + * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
> + * lock and is not detached.
> + * @vma: The VMA to assert.
> + */
> static inline void vma_assert_locked(struct vm_area_struct *vma)
> {
> - unsigned int mm_lock_seq;
> + unsigned int refs;
>
> /*
> * See the comment describing the vm_area_struct->vm_refcnt field for
> * details of possible refcnt values.
> */
> - VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
> - !__is_vma_write_locked(vma, &mm_lock_seq), vma);
> +
> + /*
> + * If read-locked or currently excluding readers, then the VMA is
> + * locked.
> + */
> +#ifdef CONFIG_LOCKDEP
> + if (lock_is_held(&vma->vmlock_dep_map))
> + return;
> +#endif
> +
> + refs = refcount_read(&vma->vm_refcnt);
> +
> + /*
> + * In this case we're either read-locked, write-locked with temporary
> + * readers, or in the midst of excluding readers, all of which means
> + * we're locked.
> + */
> + if (refs > 1)
> + return;
> +
> + /* It is a bug for the VMA to be detached here. */
> + VM_BUG_ON_VMA(!refs, vma);
> +
> + /*
> + * OK, the VMA has a reference count of 1 which means it is either
> + * unlocked and attached or write-locked, so assert that it is
> + * write-locked.
> + */
> + vma_assert_write_locked(vma);
> }
>
> static inline bool vma_is_attached(struct vm_area_struct *vma)
> --
> 2.52.0
On Thu, Jan 22, 2026 at 02:02:53PM -0800, Suren Baghdasaryan wrote: > On Thu, Jan 22, 2026 at 5:02 AM Lorenzo Stoakes > <lorenzo.stoakes@oracle.com> wrote: > > > > We can use lockdep to avoid unnecessary work here, otherwise update the > > code to logically evaluate all pertinent cases and share code with > > vma_assert_write_locked(). > > > > Make it clear here that we treat the VMA being detached at this point as a > > bug, this was only implicit before. > > > > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > > Looks correct. > Reviewed-by: Suren Baghdasaryan <surenb@google.com> Thanks!
© 2016 - 2026 Red Hat, Inc.