[PATCH] rust_binder: use lock_vma_under_rcu() in use_page_slow()

Alice Ryhl posted 1 patch 1 month, 1 week ago
drivers/android/binder/page_range.rs | 37 ++++++++++++++++++------------------
1 file changed, 19 insertions(+), 18 deletions(-)
[PATCH] rust_binder: use lock_vma_under_rcu() in use_page_slow()
Posted by Alice Ryhl 1 month, 1 week ago
There's no reason to lock the whole mm when we are doing operations on
the vma if we can help it, so to reduce contention, use the
lock_vma_under_rcu() abstraction.

Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
Depends on:
https://lore.kernel.org/all/20260218-binder-vma-check-v2-0-60f9d695a990@google.com/
---
 drivers/android/binder/page_range.rs | 37 ++++++++++++++++++------------------
 1 file changed, 19 insertions(+), 18 deletions(-)

diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
index 67aae783e8b8b7cf60ecf7e711d5f6f6f5d1dbe3..9dfc154e5dd4e889c4f3aa89e5edb89434113e1a 100644
--- a/drivers/android/binder/page_range.rs
+++ b/drivers/android/binder/page_range.rs
@@ -435,24 +435,25 @@ unsafe fn use_page_slow(&self, i: usize) -> Result<()> {
         //
         // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
         // workqueue.
-        check_vma(
-            MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
-                .mmap_read_lock()
-                .vma_lookup(vma_addr)
-                .ok_or(ESRCH)?,
-            self,
-        )
-        .ok_or(ESRCH)?
-        .vm_insert_page(user_page_addr, &new_page)
-        .inspect_err(|err| {
-            pr_warn!(
-                "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
-                user_page_addr,
-                vma_addr,
-                i,
-                err
-            )
-        })?;
+        let mm = MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?);
+        {
+            let vma_read;
+            let mmap_read;
+            let vma = if let Some(ret) = mm.lock_vma_under_rcu(vma_addr) {
+                vma_read = ret;
+                check_vma(&vma_read, self)
+            } else {
+                mmap_read = mm.mmap_read_lock();
+                mmap_read
+                    .vma_lookup(vma_addr)
+                    .and_then(|vma| check_vma(vma, self))
+            };
+
+            match vma {
+                Some(vma) => vma.vm_insert_page(user_page_addr, &new_page)?,
+                None => return Err(ESRCH),
+            }
+        }
 
         let inner = self.lock.lock();
 

---
base-commit: 2961f841b025fb234860bac26dfb7fa7cb0fb122
change-id: 20260217-binder-vma-rcu-e699d5752ad3
prerequisite-change-id: 20260217-binder-vma-check-b6fca42e986c:v2
prerequisite-patch-id: 4ca86894150aa7ee26c04440100cb71ce599ce80
prerequisite-patch-id: beb72c0aa2ce3d495dd69817507ae6885e5ae1e6

Best regards,
-- 
Alice Ryhl <aliceryhl@google.com>
Re: [PATCH] rust_binder: use lock_vma_under_rcu() in use_page_slow()
Posted by Jann Horn 1 month, 1 week ago
On Wed, Feb 18, 2026 at 4:13 PM Alice Ryhl <aliceryhl@google.com> wrote:
> There's no reason to lock the whole mm when we are doing operations on
> the vma if we can help it, so to reduce contention, use the
> lock_vma_under_rcu() abstraction.
>
> Signed-off-by: Alice Ryhl <aliceryhl@google.com>

Reviewed-by: Jann Horn <jannh@google.com>

> ---
> Depends on:
> https://lore.kernel.org/all/20260218-binder-vma-check-v2-0-60f9d695a990@google.com/
> ---
>  drivers/android/binder/page_range.rs | 37 ++++++++++++++++++------------------
>  1 file changed, 19 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
> index 67aae783e8b8b7cf60ecf7e711d5f6f6f5d1dbe3..9dfc154e5dd4e889c4f3aa89e5edb89434113e1a 100644
> --- a/drivers/android/binder/page_range.rs
> +++ b/drivers/android/binder/page_range.rs
> @@ -435,24 +435,25 @@ unsafe fn use_page_slow(&self, i: usize) -> Result<()> {
>          //
>          // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
>          // workqueue.
> -        check_vma(
> -            MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
> -                .mmap_read_lock()
> -                .vma_lookup(vma_addr)
> -                .ok_or(ESRCH)?,
> -            self,
> -        )
> -        .ok_or(ESRCH)?
> -        .vm_insert_page(user_page_addr, &new_page)
> -        .inspect_err(|err| {
> -            pr_warn!(
> -                "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
> -                user_page_addr,
> -                vma_addr,
> -                i,
> -                err
> -            )
> -        })?;
> +        let mm = MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?);
> +        {
> +            let vma_read;
> +            let mmap_read;
> +            let vma = if let Some(ret) = mm.lock_vma_under_rcu(vma_addr) {
> +                vma_read = ret;
> +                check_vma(&vma_read, self)

nit: this might look nicer if the check_vma() calls were moved out of
the two separate branches

> +            } else {
> +                mmap_read = mm.mmap_read_lock();
> +                mmap_read
> +                    .vma_lookup(vma_addr)
> +                    .and_then(|vma| check_vma(vma, self))
> +            };
> +
> +            match vma {
> +                Some(vma) => vma.vm_insert_page(user_page_addr, &new_page)?,
> +                None => return Err(ESRCH),
> +            }
> +        }
Re: [PATCH] rust_binder: use lock_vma_under_rcu() in use_page_slow()
Posted by Liam R. Howlett 1 month, 1 week ago
* Alice Ryhl <aliceryhl@google.com> [260218 10:13]:
> There's no reason to lock the whole mm when we are doing operations on
> the vma if we can help it, so to reduce contention, use the
> lock_vma_under_rcu() abstraction.
> 
> Signed-off-by: Alice Ryhl <aliceryhl@google.com>

FWIW for rust code..

Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>

> ---
> Depends on:
> https://lore.kernel.org/all/20260218-binder-vma-check-v2-0-60f9d695a990@google.com/
> ---
>  drivers/android/binder/page_range.rs | 37 ++++++++++++++++++------------------
>  1 file changed, 19 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
> index 67aae783e8b8b7cf60ecf7e711d5f6f6f5d1dbe3..9dfc154e5dd4e889c4f3aa89e5edb89434113e1a 100644
> --- a/drivers/android/binder/page_range.rs
> +++ b/drivers/android/binder/page_range.rs
> @@ -435,24 +435,25 @@ unsafe fn use_page_slow(&self, i: usize) -> Result<()> {
>          //
>          // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
>          // workqueue.
> -        check_vma(
> -            MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
> -                .mmap_read_lock()
> -                .vma_lookup(vma_addr)
> -                .ok_or(ESRCH)?,
> -            self,
> -        )
> -        .ok_or(ESRCH)?
> -        .vm_insert_page(user_page_addr, &new_page)
> -        .inspect_err(|err| {
> -            pr_warn!(
> -                "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
> -                user_page_addr,
> -                vma_addr,
> -                i,
> -                err
> -            )
> -        })?;
> +        let mm = MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?);
> +        {
> +            let vma_read;
> +            let mmap_read;
> +            let vma = if let Some(ret) = mm.lock_vma_under_rcu(vma_addr) {
> +                vma_read = ret;
> +                check_vma(&vma_read, self)
> +            } else {
> +                mmap_read = mm.mmap_read_lock();
> +                mmap_read
> +                    .vma_lookup(vma_addr)
> +                    .and_then(|vma| check_vma(vma, self))
> +            };
> +
> +            match vma {
> +                Some(vma) => vma.vm_insert_page(user_page_addr, &new_page)?,
> +                None => return Err(ESRCH),
> +            }
> +        }
>  
>          let inner = self.lock.lock();
>  
> 
> ---
> base-commit: 2961f841b025fb234860bac26dfb7fa7cb0fb122
> change-id: 20260217-binder-vma-rcu-e699d5752ad3
> prerequisite-change-id: 20260217-binder-vma-check-b6fca42e986c:v2
> prerequisite-patch-id: 4ca86894150aa7ee26c04440100cb71ce599ce80
> prerequisite-patch-id: beb72c0aa2ce3d495dd69817507ae6885e5ae1e6
> 
> Best regards,
> -- 
> Alice Ryhl <aliceryhl@google.com>
>