Utilize per-vma locks to stabilize vma after lookup without taking
mmap_lock during PROCMAP_QUERY ioctl execution. If vma lock is
contended, we fall back to mmap_lock but take it only momentarily
to lock the vma and release the mmap_lock. In a very unlikely case
of vm_refcnt overflow, this fall back path will fail and ioctl is
done under mmap_lock protection.
This change is designed to reduce mmap_lock contention and prevent
PROCMAP_QUERY ioctl calls from blocking address space updates.
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
fs/proc/task_mmu.c | 81 +++++++++++++++++++++++++++++++++++++---------
1 file changed, 65 insertions(+), 16 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 843577aa7a32..1d06ecdbef6f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -517,28 +517,78 @@ static int pid_maps_open(struct inode *inode, struct file *file)
PROCMAP_QUERY_VMA_FLAGS \
)
-static int query_vma_setup(struct mm_struct *mm)
+#ifdef CONFIG_PER_VMA_LOCK
+
+static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx)
{
- return mmap_read_lock_killable(mm);
+ lock_ctx->locked_vma = NULL;
+ lock_ctx->mmap_locked = false;
+
+ return 0;
}
-static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma)
+static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx)
{
- mmap_read_unlock(mm);
+ if (lock_ctx->mmap_locked)
+ mmap_read_unlock(lock_ctx->mm);
+ else
+ unlock_vma(lock_ctx);
}
-static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr)
+static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx,
+ unsigned long addr)
{
- return find_vma(mm, addr);
+ struct vm_area_struct *vma;
+ struct vma_iterator vmi;
+
+ unlock_vma(lock_ctx);
+ rcu_read_lock();
+ vma_iter_init(&vmi, lock_ctx->mm, addr);
+ vma = lock_next_vma(lock_ctx->mm, &vmi, addr);
+ rcu_read_unlock();
+
+ if (!IS_ERR_OR_NULL(vma)) {
+ lock_ctx->locked_vma = vma;
+ } else if (PTR_ERR(vma) == -EAGAIN) {
+ /* Fallback to mmap_lock on vma->vm_refcnt overflow */
+ mmap_read_lock(lock_ctx->mm);
+ vma = find_vma(lock_ctx->mm, addr);
+ lock_ctx->mmap_locked = true;
+ }
+
+ return vma;
}
-static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
+#else /* CONFIG_PER_VMA_LOCK */
+
+static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx)
+{
+ return mmap_read_lock_killable(lock_ctx->mm);
+}
+
+static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx)
+{
+ mmap_read_unlock(lock_ctx->mm);
+}
+
+static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx,
+ unsigned long addr)
+{
+ return find_vma(lock_ctx->mm, addr);
+}
+
+#endif /* CONFIG_PER_VMA_LOCK */
+
+static struct vm_area_struct *query_matching_vma(struct proc_maps_locking_ctx *lock_ctx,
unsigned long addr, u32 flags)
{
struct vm_area_struct *vma;
next_vma:
- vma = query_vma_find_by_addr(mm, addr);
+ vma = query_vma_find_by_addr(lock_ctx, addr);
+ if (IS_ERR(vma))
+ return vma;
+
if (!vma)
goto no_vma;
@@ -579,11 +629,11 @@ static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
return ERR_PTR(-ENOENT);
}
-static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
+static int do_procmap_query(struct mm_struct *mm, void __user *uarg)
{
+ struct proc_maps_locking_ctx lock_ctx = { .mm = mm };
struct procmap_query karg;
struct vm_area_struct *vma;
- struct mm_struct *mm;
const char *name = NULL;
char build_id_buf[BUILD_ID_SIZE_MAX], *name_buf = NULL;
__u64 usize;
@@ -610,17 +660,16 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
if (!!karg.build_id_size != !!karg.build_id_addr)
return -EINVAL;
- mm = priv->lock_ctx.mm;
if (!mm || !mmget_not_zero(mm))
return -ESRCH;
- err = query_vma_setup(mm);
+ err = query_vma_setup(&lock_ctx);
if (err) {
mmput(mm);
return err;
}
- vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
+ vma = query_matching_vma(&lock_ctx, karg.query_addr, karg.query_flags);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
vma = NULL;
@@ -705,7 +754,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
}
/* unlock vma or mmap_lock, and put mm_struct before copying data to user */
- query_vma_teardown(mm, vma);
+ query_vma_teardown(&lock_ctx);
mmput(mm);
if (karg.vma_name_size && copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
@@ -725,7 +774,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
return 0;
out:
- query_vma_teardown(mm, vma);
+ query_vma_teardown(&lock_ctx);
mmput(mm);
kfree(name_buf);
return err;
@@ -738,7 +787,7 @@ static long procfs_procmap_ioctl(struct file *file, unsigned int cmd, unsigned l
switch (cmd) {
case PROCMAP_QUERY:
- return do_procmap_query(priv, (void __user *)arg);
+ return do_procmap_query(priv->lock_ctx.mm, (void __user *)arg);
default:
return -ENOIOCTLCMD;
}
--
2.50.1.565.gc32cd1483b-goog
On 8/5/25 1:15 AM, Suren Baghdasaryan wrote: > Utilize per-vma locks to stabilize vma after lookup without taking > mmap_lock during PROCMAP_QUERY ioctl execution. If vma lock is > contended, we fall back to mmap_lock but take it only momentarily > to lock the vma and release the mmap_lock. In a very unlikely case > of vm_refcnt overflow, this fall back path will fail and ioctl is > done under mmap_lock protection. > > This change is designed to reduce mmap_lock contention and prevent > PROCMAP_QUERY ioctl calls from blocking address space updates. > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > --- > fs/proc/task_mmu.c | 81 +++++++++++++++++++++++++++++++++++++--------- > 1 file changed, 65 insertions(+), 16 deletions(-) > > diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c > index 843577aa7a32..1d06ecdbef6f 100644 > --- a/fs/proc/task_mmu.c > +++ b/fs/proc/task_mmu.c > @@ -517,28 +517,78 @@ static int pid_maps_open(struct inode *inode, struct file *file) > PROCMAP_QUERY_VMA_FLAGS \ > ) > > -static int query_vma_setup(struct mm_struct *mm) > +#ifdef CONFIG_PER_VMA_LOCK > + > +static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx) > { > - return mmap_read_lock_killable(mm); > + lock_ctx->locked_vma = NULL; > + lock_ctx->mmap_locked = false; > + > + return 0; > } > > -static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma) > +static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx) > { > - mmap_read_unlock(mm); > + if (lock_ctx->mmap_locked) > + mmap_read_unlock(lock_ctx->mm); > + else > + unlock_vma(lock_ctx); > } > > -static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr) > +static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx, > + unsigned long addr) > { > - return find_vma(mm, addr); > + struct vm_area_struct *vma; > + struct vma_iterator vmi; > Hm I think we can reach here with lock_ctx->mmap_locked being true via "goto next_vma" in query_matching_vma(). In that case we should just "return find_vma()" and doing the below is wrong, no? > + unlock_vma(lock_ctx); > + rcu_read_lock(); > + vma_iter_init(&vmi, lock_ctx->mm, addr); > + vma = lock_next_vma(lock_ctx->mm, &vmi, addr); > + rcu_read_unlock(); > + > + if (!IS_ERR_OR_NULL(vma)) { > + lock_ctx->locked_vma = vma; > + } else if (PTR_ERR(vma) == -EAGAIN) { > + /* Fallback to mmap_lock on vma->vm_refcnt overflow */ > + mmap_read_lock(lock_ctx->mm); > + vma = find_vma(lock_ctx->mm, addr); > + lock_ctx->mmap_locked = true; > + } > + > + return vma; > } >
On Tue, Aug 5, 2025 at 7:18 AM Vlastimil Babka <vbabka@suse.cz> wrote: > > On 8/5/25 1:15 AM, Suren Baghdasaryan wrote: > > Utilize per-vma locks to stabilize vma after lookup without taking > > mmap_lock during PROCMAP_QUERY ioctl execution. If vma lock is > > contended, we fall back to mmap_lock but take it only momentarily > > to lock the vma and release the mmap_lock. In a very unlikely case > > of vm_refcnt overflow, this fall back path will fail and ioctl is > > done under mmap_lock protection. > > > > This change is designed to reduce mmap_lock contention and prevent > > PROCMAP_QUERY ioctl calls from blocking address space updates. > > > > Signed-off-by: Suren Baghdasaryan <surenb@google.com> > > --- > > fs/proc/task_mmu.c | 81 +++++++++++++++++++++++++++++++++++++--------- > > 1 file changed, 65 insertions(+), 16 deletions(-) > > > > diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c > > index 843577aa7a32..1d06ecdbef6f 100644 > > --- a/fs/proc/task_mmu.c > > +++ b/fs/proc/task_mmu.c > > @@ -517,28 +517,78 @@ static int pid_maps_open(struct inode *inode, struct file *file) > > PROCMAP_QUERY_VMA_FLAGS \ > > ) > > > > -static int query_vma_setup(struct mm_struct *mm) > > +#ifdef CONFIG_PER_VMA_LOCK > > + > > +static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx) > > { > > - return mmap_read_lock_killable(mm); > > + lock_ctx->locked_vma = NULL; > > + lock_ctx->mmap_locked = false; > > + > > + return 0; > > } > > > > -static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma) > > +static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx) > > { > > - mmap_read_unlock(mm); > > + if (lock_ctx->mmap_locked) > > + mmap_read_unlock(lock_ctx->mm); > > + else > > + unlock_vma(lock_ctx); > > } > > > > -static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr) > > +static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx, > > + unsigned long addr) > > { > > - return find_vma(mm, addr); > > + struct vm_area_struct *vma; > > + struct vma_iterator vmi; > > > > Hm I think we can reach here with lock_ctx->mmap_locked being true via > "goto next_vma" in query_matching_vma(). In that case we should just > "return find_vma()" and doing the below is wrong, no? Ah, you are quite right. I should handle mmap_locked differently in query_vma_find_by_addr(). I will post the fix shortly. > > > + unlock_vma(lock_ctx); > > + rcu_read_lock(); > > + vma_iter_init(&vmi, lock_ctx->mm, addr); > > + vma = lock_next_vma(lock_ctx->mm, &vmi, addr); > > + rcu_read_unlock(); > > + > > + if (!IS_ERR_OR_NULL(vma)) { > > + lock_ctx->locked_vma = vma; > > + } else if (PTR_ERR(vma) == -EAGAIN) { > > + /* Fallback to mmap_lock on vma->vm_refcnt overflow */ > > + mmap_read_lock(lock_ctx->mm); > > + vma = find_vma(lock_ctx->mm, addr); > > + lock_ctx->mmap_locked = true; > > + } > > + > > + return vma; > > } > >
© 2016 - 2025 Red Hat, Inc.