Utilize per-vma locks to stabilize vma after lookup without taking
mmap_lock during PROCMAP_QUERY ioctl execution. While we might take
mmap_lock for reading during contention, we do that momentarily only
to lock the vma.
This change is designed to reduce mmap_lock contention and prevent
PROCMAP_QUERY ioctl calls from blocking address space updates.
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
---
fs/proc/task_mmu.c | 60 ++++++++++++++++++++++++++++++++++++----------
1 file changed, 48 insertions(+), 12 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ff3fe488ce51..0496d5969a51 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -487,28 +487,64 @@ static int pid_maps_open(struct inode *inode, struct file *file)
PROCMAP_QUERY_VMA_FLAGS \
)
-static int query_vma_setup(struct mm_struct *mm)
+#ifdef CONFIG_PER_VMA_LOCK
+
+static int query_vma_setup(struct proc_maps_private *priv)
{
- return mmap_read_lock_killable(mm);
+ priv->locked_vma = NULL;
+ priv->mmap_locked = false;
+
+ return 0;
}
-static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma)
+static void query_vma_teardown(struct proc_maps_private *priv)
{
- mmap_read_unlock(mm);
+ unlock_vma(priv);
+}
+
+static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_private *priv,
+ unsigned long addr)
+{
+ struct vm_area_struct *vma;
+
+ rcu_read_lock();
+ vma_iter_init(&priv->iter, priv->mm, addr);
+ vma = get_next_vma(priv, addr);
+ rcu_read_unlock();
+
+ return vma;
}
-static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr)
+#else /* CONFIG_PER_VMA_LOCK */
+
+static int query_vma_setup(struct proc_maps_private *priv)
{
- return find_vma(mm, addr);
+ return mmap_read_lock_killable(priv->mm);
}
-static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
+static void query_vma_teardown(struct proc_maps_private *priv)
+{
+ mmap_read_unlock(priv->mm);
+}
+
+static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_private *priv,
+ unsigned long addr)
+{
+ return find_vma(priv->mm, addr);
+}
+
+#endif /* CONFIG_PER_VMA_LOCK */
+
+static struct vm_area_struct *query_matching_vma(struct proc_maps_private *priv,
unsigned long addr, u32 flags)
{
struct vm_area_struct *vma;
next_vma:
- vma = query_vma_find_by_addr(mm, addr);
+ vma = query_vma_find_by_addr(priv, addr);
+ if (IS_ERR(vma))
+ return vma;
+
if (!vma)
goto no_vma;
@@ -584,13 +620,13 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
if (!mm || !mmget_not_zero(mm))
return -ESRCH;
- err = query_vma_setup(mm);
+ err = query_vma_setup(priv);
if (err) {
mmput(mm);
return err;
}
- vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
+ vma = query_matching_vma(priv, karg.query_addr, karg.query_flags);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
vma = NULL;
@@ -675,7 +711,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
}
/* unlock vma or mmap_lock, and put mm_struct before copying data to user */
- query_vma_teardown(mm, vma);
+ query_vma_teardown(priv);
mmput(mm);
if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
@@ -695,7 +731,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
return 0;
out:
- query_vma_teardown(mm, vma);
+ query_vma_teardown(priv);
mmput(mm);
kfree(name_buf);
return err;
--
2.50.0.727.gbf7dc18ff4-goog
* Suren Baghdasaryan <surenb@google.com> [250704 02:07]: > Utilize per-vma locks to stabilize vma after lookup without taking > mmap_lock during PROCMAP_QUERY ioctl execution. While we might take > mmap_lock for reading during contention, we do that momentarily only > to lock the vma. > This change is designed to reduce mmap_lock contention and prevent > PROCMAP_QUERY ioctl calls from blocking address space updates. > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > Acked-by: Andrii Nakryiko <andrii@kernel.org> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com> > --- > fs/proc/task_mmu.c | 60 ++++++++++++++++++++++++++++++++++++---------- > 1 file changed, 48 insertions(+), 12 deletions(-) > > diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c > index ff3fe488ce51..0496d5969a51 100644 > --- a/fs/proc/task_mmu.c > +++ b/fs/proc/task_mmu.c > @@ -487,28 +487,64 @@ static int pid_maps_open(struct inode *inode, struct file *file) > PROCMAP_QUERY_VMA_FLAGS \ > ) > > -static int query_vma_setup(struct mm_struct *mm) > +#ifdef CONFIG_PER_VMA_LOCK > + > +static int query_vma_setup(struct proc_maps_private *priv) > { > - return mmap_read_lock_killable(mm); > + priv->locked_vma = NULL; > + priv->mmap_locked = false; > + > + return 0; > } > > -static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma) > +static void query_vma_teardown(struct proc_maps_private *priv) > { > - mmap_read_unlock(mm); > + unlock_vma(priv); > +} > + > +static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_private *priv, > + unsigned long addr) > +{ > + struct vm_area_struct *vma; > + > + rcu_read_lock(); > + vma_iter_init(&priv->iter, priv->mm, addr); > + vma = get_next_vma(priv, addr); > + rcu_read_unlock(); > + > + return vma; > } > > -static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr) > +#else /* CONFIG_PER_VMA_LOCK */ > + > +static int query_vma_setup(struct proc_maps_private *priv) > { > - return find_vma(mm, addr); > + return mmap_read_lock_killable(priv->mm); > } > > -static struct vm_area_struct *query_matching_vma(struct mm_struct *mm, > +static void query_vma_teardown(struct proc_maps_private *priv) > +{ > + mmap_read_unlock(priv->mm); > +} > + > +static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_private *priv, > + unsigned long addr) > +{ > + return find_vma(priv->mm, addr); > +} > + > +#endif /* CONFIG_PER_VMA_LOCK */ > + > +static struct vm_area_struct *query_matching_vma(struct proc_maps_private *priv, > unsigned long addr, u32 flags) > { > struct vm_area_struct *vma; > > next_vma: > - vma = query_vma_find_by_addr(mm, addr); > + vma = query_vma_find_by_addr(priv, addr); > + if (IS_ERR(vma)) > + return vma; > + > if (!vma) > goto no_vma; > > @@ -584,13 +620,13 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) > if (!mm || !mmget_not_zero(mm)) > return -ESRCH; > > - err = query_vma_setup(mm); > + err = query_vma_setup(priv); > if (err) { > mmput(mm); > return err; > } > > - vma = query_matching_vma(mm, karg.query_addr, karg.query_flags); > + vma = query_matching_vma(priv, karg.query_addr, karg.query_flags); > if (IS_ERR(vma)) { > err = PTR_ERR(vma); > vma = NULL; > @@ -675,7 +711,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) > } > > /* unlock vma or mmap_lock, and put mm_struct before copying data to user */ > - query_vma_teardown(mm, vma); > + query_vma_teardown(priv); > mmput(mm); > > if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr), > @@ -695,7 +731,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) > return 0; > > out: > - query_vma_teardown(mm, vma); > + query_vma_teardown(priv); > mmput(mm); > kfree(name_buf); > return err; > -- > 2.50.0.727.gbf7dc18ff4-goog >
On Thu, Jul 03, 2025 at 11:07:26PM -0700, Suren Baghdasaryan wrote: > Utilize per-vma locks to stabilize vma after lookup without taking > mmap_lock during PROCMAP_QUERY ioctl execution. While we might take > mmap_lock for reading during contention, we do that momentarily only > to lock the vma. > This change is designed to reduce mmap_lock contention and prevent > PROCMAP_QUERY ioctl calls from blocking address space updates. > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > Acked-by: Andrii Nakryiko <andrii@kernel.org> This LGTM so: Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > --- > fs/proc/task_mmu.c | 60 ++++++++++++++++++++++++++++++++++++---------- > 1 file changed, 48 insertions(+), 12 deletions(-) > > diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c > index ff3fe488ce51..0496d5969a51 100644 > --- a/fs/proc/task_mmu.c > +++ b/fs/proc/task_mmu.c > @@ -487,28 +487,64 @@ static int pid_maps_open(struct inode *inode, struct file *file) > PROCMAP_QUERY_VMA_FLAGS \ > ) > > -static int query_vma_setup(struct mm_struct *mm) > +#ifdef CONFIG_PER_VMA_LOCK > + > +static int query_vma_setup(struct proc_maps_private *priv) > { > - return mmap_read_lock_killable(mm); > + priv->locked_vma = NULL; > + priv->mmap_locked = false; > + > + return 0; > } > > -static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma) > +static void query_vma_teardown(struct proc_maps_private *priv) > { > - mmap_read_unlock(mm); > + unlock_vma(priv); > +} > + > +static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_private *priv, > + unsigned long addr) > +{ > + struct vm_area_struct *vma; > + > + rcu_read_lock(); > + vma_iter_init(&priv->iter, priv->mm, addr); > + vma = get_next_vma(priv, addr); > + rcu_read_unlock(); > + > + return vma; > } > > -static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr) > +#else /* CONFIG_PER_VMA_LOCK */ > + > +static int query_vma_setup(struct proc_maps_private *priv) > { > - return find_vma(mm, addr); > + return mmap_read_lock_killable(priv->mm); > } > > -static struct vm_area_struct *query_matching_vma(struct mm_struct *mm, > +static void query_vma_teardown(struct proc_maps_private *priv) > +{ > + mmap_read_unlock(priv->mm); > +} > + > +static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_private *priv, > + unsigned long addr) > +{ > + return find_vma(priv->mm, addr); > +} > + > +#endif /* CONFIG_PER_VMA_LOCK */ > + > +static struct vm_area_struct *query_matching_vma(struct proc_maps_private *priv, > unsigned long addr, u32 flags) > { > struct vm_area_struct *vma; > > next_vma: > - vma = query_vma_find_by_addr(mm, addr); > + vma = query_vma_find_by_addr(priv, addr); > + if (IS_ERR(vma)) > + return vma; > + > if (!vma) > goto no_vma; > > @@ -584,13 +620,13 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) > if (!mm || !mmget_not_zero(mm)) > return -ESRCH; > > - err = query_vma_setup(mm); > + err = query_vma_setup(priv); > if (err) { > mmput(mm); > return err; > } > > - vma = query_matching_vma(mm, karg.query_addr, karg.query_flags); > + vma = query_matching_vma(priv, karg.query_addr, karg.query_flags); > if (IS_ERR(vma)) { > err = PTR_ERR(vma); > vma = NULL; > @@ -675,7 +711,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) > } > > /* unlock vma or mmap_lock, and put mm_struct before copying data to user */ > - query_vma_teardown(mm, vma); > + query_vma_teardown(priv); > mmput(mm); > > if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr), > @@ -695,7 +731,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg) > return 0; > > out: > - query_vma_teardown(mm, vma); > + query_vma_teardown(priv); > mmput(mm); > kfree(name_buf); > return err; > -- > 2.50.0.727.gbf7dc18ff4-goog >
© 2016 - 2025 Red Hat, Inc.