We can use lockdep to avoid unnecessary work here, otherwise update the
code to logically evaluate all pertinent cases and share code with
vma_assert_write_locked().
Make it clear here that we treat the VMA being detached at this point as a
bug, this was only implicit before.
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
include/linux/mmap_lock.h | 41 +++++++++++++++++++++++++++++++++++++--
1 file changed, 39 insertions(+), 2 deletions(-)
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 23bde4bd5a85..4a0aafc66c5d 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -322,19 +322,56 @@ int vma_start_write_killable(struct vm_area_struct *vma)
return __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_KILLABLE);
}
+/**
+ * vma_assert_write_locked() - assert that @vma holds a VMA write lock.
+ * @vma: The VMA to assert.
+ */
static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
VM_WARN_ON_ONCE_VMA(!__is_vma_write_locked(vma), vma);
}
+/**
+ * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
+ * lock and is not detached.
+ * @vma: The VMA to assert.
+ */
static inline void vma_assert_locked(struct vm_area_struct *vma)
{
+ unsigned int refcnt;
+
+ /*
+ * If read-locked or currently excluding readers, then the VMA is
+ * locked.
+ */
+#ifdef CONFIG_LOCKDEP
+ if (lock_is_held(&vma->vmlock_dep_map))
+ return;
+#endif
+
/*
* See the comment describing the vm_area_struct->vm_refcnt field for
* details of possible refcnt values.
*/
- VM_WARN_ON_ONCE_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
- !__is_vma_write_locked(vma), vma);
+ refcnt = refcount_read(&vma->vm_refcnt);
+
+ /*
+ * In this case we're either read-locked, write-locked with temporary
+ * readers, or in the midst of excluding readers, all of which means
+ * we're locked.
+ */
+ if (refcnt > 1)
+ return;
+
+ /* It is a bug for the VMA to be detached here. */
+ VM_WARN_ON_ONCE_VMA(!refcnt, vma);
+
+ /*
+ * OK, the VMA has a reference count of 1 which means it is either
+ * unlocked and attached or write-locked, so assert that it is
+ * write-locked.
+ */
+ vma_assert_write_locked(vma);
}
static inline bool vma_is_attached(struct vm_area_struct *vma)
--
2.52.0
Hi Andrew,
Could you apply the attached fix-patch to apply Vlasta's suggestion, and to
avoid unnecessary ifdeffery.
Could you also adjust the original commit message to append the below?
Thanks!
-->
Additionally, abstract references to vma->vmlock_dep_map by introducing a
macro helper __vma_lockdep_map() which accesses this field if lockdep is
enabled.
Since lock_is_held() is specified as an extern function if lockdep is
disabled, we can simply have __vma_lockdep_map() defined as NULL in this
case, and then use IS_ENABLED(CONFIG_LOCKDEP) to avoid ugly ifdeffery.
<--
Cheers, Lorenzo
----8<----
From 9fb6dbe821c87759b269ddc926343d1f9c26db2a Mon Sep 17 00:00:00 2001
From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Date: Mon, 26 Jan 2026 16:40:34 +0000
Subject: [PATCH] fix
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
---
include/linux/mmap_lock.h | 29 ++++++++++++++++-------------
1 file changed, 16 insertions(+), 13 deletions(-)
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 4a0aafc66c5d..160c572c1798 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -78,6 +78,12 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
#ifdef CONFIG_PER_VMA_LOCK
+#ifdef CONFIG_LOCKDEP
+#define __vma_lockdep_map(vma) (&vma->vmlock_dep_map)
+#else
+#define __vma_lockdep_map(vma) NULL
+#endif
+
/*
* VMA locks do not behave like most ordinary locks found in the kernel, so we
* cannot quite have full lockdep tracking in the way we would ideally prefer.
@@ -98,16 +104,16 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
* so we utilise lockdep to do so.
*/
#define __vma_lockdep_acquire_read(vma) \
- lock_acquire_shared(&vma->vmlock_dep_map, 0, 1, NULL, _RET_IP_)
+ lock_acquire_shared(__vma_lockdep_map(vma), 0, 1, NULL, _RET_IP_)
#define __vma_lockdep_release_read(vma) \
- lock_release(&vma->vmlock_dep_map, _RET_IP_)
+ lock_release(__vma_lockdep_map(vma), _RET_IP_)
#define __vma_lockdep_acquire_exclusive(vma) \
- lock_acquire_exclusive(&vma->vmlock_dep_map, 0, 0, NULL, _RET_IP_)
+ lock_acquire_exclusive(__vma_lockdep_map(vma), 0, 0, NULL, _RET_IP_)
#define __vma_lockdep_release_exclusive(vma) \
- lock_release(&vma->vmlock_dep_map, _RET_IP_)
+ lock_release(__vma_lockdep_map(vma), _RET_IP_)
/* Only meaningful if CONFIG_LOCK_STAT is defined. */
#define __vma_lockdep_stat_mark_acquired(vma) \
- lock_acquired(&vma->vmlock_dep_map, _RET_IP_)
+ lock_acquired(__vma_lockdep_map(vma), _RET_IP_)
static inline void mm_lock_seqcount_init(struct mm_struct *mm)
{
@@ -146,7 +152,7 @@ static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key lockdep_key;
- lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0);
+ lockdep_init_map(__vma_lockdep_map(vma), "vm_lock", &lockdep_key, 0);
#endif
if (reset_refcnt)
refcount_set(&vma->vm_refcnt, 0);
@@ -340,14 +346,11 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
{
unsigned int refcnt;
- /*
- * If read-locked or currently excluding readers, then the VMA is
- * locked.
- */
-#ifdef CONFIG_LOCKDEP
- if (lock_is_held(&vma->vmlock_dep_map))
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ if (!lock_is_held(__vma_lockdep_map(vma)))
+ vma_assert_write_locked(vma);
return;
-#endif
+ }
/*
* See the comment describing the vm_area_struct->vm_refcnt field for
--
2.52.0
On 1/23/26 21:12, Lorenzo Stoakes wrote:
> We can use lockdep to avoid unnecessary work here, otherwise update the
> code to logically evaluate all pertinent cases and share code with
> vma_assert_write_locked().
>
> Make it clear here that we treat the VMA being detached at this point as a
> bug, this was only implicit before.
>
> Reviewed-by: Suren Baghdasaryan <surenb@google.com>
> Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Nit:
> ---
> include/linux/mmap_lock.h | 41 +++++++++++++++++++++++++++++++++++++--
> 1 file changed, 39 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> index 23bde4bd5a85..4a0aafc66c5d 100644
> --- a/include/linux/mmap_lock.h
> +++ b/include/linux/mmap_lock.h
> @@ -322,19 +322,56 @@ int vma_start_write_killable(struct vm_area_struct *vma)
> return __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_KILLABLE);
> }
>
> +/**
> + * vma_assert_write_locked() - assert that @vma holds a VMA write lock.
> + * @vma: The VMA to assert.
> + */
> static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> {
> VM_WARN_ON_ONCE_VMA(!__is_vma_write_locked(vma), vma);
> }
>
> +/**
> + * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
> + * lock and is not detached.
> + * @vma: The VMA to assert.
> + */
> static inline void vma_assert_locked(struct vm_area_struct *vma)
> {
> + unsigned int refcnt;
> +
> + /*
> + * If read-locked or currently excluding readers, then the VMA is
> + * locked.
> + */
> +#ifdef CONFIG_LOCKDEP
> + if (lock_is_held(&vma->vmlock_dep_map))
> + return;
Wouldn't this work a tiny bit better?
if (!lock_is_held(&vma->vmlock_dep_map))
vma_assert_write_locked(vma);
return;
> +#endif
> +
> /*
> * See the comment describing the vm_area_struct->vm_refcnt field for
> * details of possible refcnt values.
> */
> - VM_WARN_ON_ONCE_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
> - !__is_vma_write_locked(vma), vma);
> + refcnt = refcount_read(&vma->vm_refcnt);
> +
> + /*
> + * In this case we're either read-locked, write-locked with temporary
> + * readers, or in the midst of excluding readers, all of which means
> + * we're locked.
> + */
> + if (refcnt > 1)
> + return;
> +
> + /* It is a bug for the VMA to be detached here. */
> + VM_WARN_ON_ONCE_VMA(!refcnt, vma);
> +
> + /*
> + * OK, the VMA has a reference count of 1 which means it is either
> + * unlocked and attached or write-locked, so assert that it is
> + * write-locked.
> + */
> + vma_assert_write_locked(vma);
> }
>
> static inline bool vma_is_attached(struct vm_area_struct *vma)
On Mon, Jan 26, 2026 at 02:42:00PM +0100, Vlastimil Babka wrote:
> On 1/23/26 21:12, Lorenzo Stoakes wrote:
> > We can use lockdep to avoid unnecessary work here, otherwise update the
> > code to logically evaluate all pertinent cases and share code with
> > vma_assert_write_locked().
> >
> > Make it clear here that we treat the VMA being detached at this point as a
> > bug, this was only implicit before.
> >
> > Reviewed-by: Suren Baghdasaryan <surenb@google.com>
> > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
>
> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Thanks!
>
> Nit:
>
> > ---
> > include/linux/mmap_lock.h | 41 +++++++++++++++++++++++++++++++++++++--
> > 1 file changed, 39 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> > index 23bde4bd5a85..4a0aafc66c5d 100644
> > --- a/include/linux/mmap_lock.h
> > +++ b/include/linux/mmap_lock.h
> > @@ -322,19 +322,56 @@ int vma_start_write_killable(struct vm_area_struct *vma)
> > return __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_KILLABLE);
> > }
> >
> > +/**
> > + * vma_assert_write_locked() - assert that @vma holds a VMA write lock.
> > + * @vma: The VMA to assert.
> > + */
> > static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> > {
> > VM_WARN_ON_ONCE_VMA(!__is_vma_write_locked(vma), vma);
> > }
> >
> > +/**
> > + * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
> > + * lock and is not detached.
> > + * @vma: The VMA to assert.
> > + */
> > static inline void vma_assert_locked(struct vm_area_struct *vma)
> > {
> > + unsigned int refcnt;
> > +
> > + /*
> > + * If read-locked or currently excluding readers, then the VMA is
> > + * locked.
> > + */
> > +#ifdef CONFIG_LOCKDEP
> > + if (lock_is_held(&vma->vmlock_dep_map))
> > + return;
>
> Wouldn't this work a tiny bit better?
>
> if (!lock_is_held(&vma->vmlock_dep_map))
> vma_assert_write_locked(vma);
> return;
Hm yeah could do, I guess we don't need to mix the 'uncertain' stuff below with
the lockdep-certainty, at this point we _know_ there is no read lock/readers
being excluded exclusive lock so it can only be a write lock.
Will test locally to make sure sane then send a fix-patch. :)
>
> > +#endif
> > +
> > /*
> > * See the comment describing the vm_area_struct->vm_refcnt field for
> > * details of possible refcnt values.
> > */
> > - VM_WARN_ON_ONCE_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
> > - !__is_vma_write_locked(vma), vma);
> > + refcnt = refcount_read(&vma->vm_refcnt);
> > +
> > + /*
> > + * In this case we're either read-locked, write-locked with temporary
> > + * readers, or in the midst of excluding readers, all of which means
> > + * we're locked.
> > + */
> > + if (refcnt > 1)
> > + return;
> > +
> > + /* It is a bug for the VMA to be detached here. */
> > + VM_WARN_ON_ONCE_VMA(!refcnt, vma);
> > +
> > + /*
> > + * OK, the VMA has a reference count of 1 which means it is either
> > + * unlocked and attached or write-locked, so assert that it is
> > + * write-locked.
> > + */
> > + vma_assert_write_locked(vma);
> > }
> >
> > static inline bool vma_is_attached(struct vm_area_struct *vma)
>
Cheers, Lorenzo
On Mon, Jan 26, 2026 at 04:44:07PM +0000, Lorenzo Stoakes wrote:
> On Mon, Jan 26, 2026 at 02:42:00PM +0100, Vlastimil Babka wrote:
> > On 1/23/26 21:12, Lorenzo Stoakes wrote:
> > > We can use lockdep to avoid unnecessary work here, otherwise update the
> > > code to logically evaluate all pertinent cases and share code with
> > > vma_assert_write_locked().
> > >
> > > Make it clear here that we treat the VMA being detached at this point as a
> > > bug, this was only implicit before.
> > >
> > > Reviewed-by: Suren Baghdasaryan <surenb@google.com>
> > > Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> >
> > Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
>
> Thanks!
>
> >
> > Nit:
> >
> > > ---
> > > include/linux/mmap_lock.h | 41 +++++++++++++++++++++++++++++++++++++--
> > > 1 file changed, 39 insertions(+), 2 deletions(-)
> > >
> > > diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> > > index 23bde4bd5a85..4a0aafc66c5d 100644
> > > --- a/include/linux/mmap_lock.h
> > > +++ b/include/linux/mmap_lock.h
> > > @@ -322,19 +322,56 @@ int vma_start_write_killable(struct vm_area_struct *vma)
> > > return __vma_start_write(vma, __vma_raw_mm_seqnum(vma), TASK_KILLABLE);
> > > }
> > >
> > > +/**
> > > + * vma_assert_write_locked() - assert that @vma holds a VMA write lock.
> > > + * @vma: The VMA to assert.
> > > + */
> > > static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> > > {
> > > VM_WARN_ON_ONCE_VMA(!__is_vma_write_locked(vma), vma);
> > > }
> > >
> > > +/**
> > > + * vma_assert_locked() - assert that @vma holds either a VMA read or a VMA write
> > > + * lock and is not detached.
> > > + * @vma: The VMA to assert.
> > > + */
> > > static inline void vma_assert_locked(struct vm_area_struct *vma)
> > > {
> > > + unsigned int refcnt;
> > > +
> > > + /*
> > > + * If read-locked or currently excluding readers, then the VMA is
> > > + * locked.
> > > + */
> > > +#ifdef CONFIG_LOCKDEP
> > > + if (lock_is_held(&vma->vmlock_dep_map))
> > > + return;
> >
> > Wouldn't this work a tiny bit better?
> >
> > if (!lock_is_held(&vma->vmlock_dep_map))
> > vma_assert_write_locked(vma);
> > return;
>
> Hm yeah could do, I guess we don't need to mix the 'uncertain' stuff below with
> the lockdep-certainty, at this point we _know_ there is no read lock/readers
> being excluded exclusive lock so it can only be a write lock.
>
> Will test locally to make sure sane then send a fix-patch. :)
Actually I think I can get rid of this #ifdef here, will fold it into patch...
© 2016 - 2026 Red Hat, Inc.