Use per-vma locking for concurrent page installations, this minimizes
contention with unrelated vmas improving performance. The mmap_lock is
still acquired when needed though, e.g. before get_user_pages_remote().
Many thanks to Barry Song who posted a similar approach [1].
Link: https://lore.kernel.org/all/20240902225009.34576-1-21cnbao@gmail.com/ [1]
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Barry Song <v-songbaohua@oppo.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Carlos Llamas <cmllamas@google.com>
---
drivers/android/binder_alloc.c | 61 +++++++++++++++++++++++++---------
1 file changed, 45 insertions(+), 16 deletions(-)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 3716ffd00baf..7d2cad9beebb 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -233,6 +233,48 @@ static inline bool binder_alloc_is_mapped(struct binder_alloc *alloc)
return smp_load_acquire(&alloc->mapped);
}
+static struct page *binder_page_lookup(struct binder_alloc *alloc,
+ unsigned long addr)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct page *page;
+ long ret = 0;
+
+ mmap_read_lock(mm);
+ if (binder_alloc_is_mapped(alloc))
+ ret = get_user_pages_remote(mm, addr, 1, 0, &page, NULL);
+ mmap_read_unlock(mm);
+
+ return ret > 0 ? page : NULL;
+}
+
+static int binder_page_insert(struct binder_alloc *alloc,
+ unsigned long addr,
+ struct page *page)
+{
+ struct mm_struct *mm = alloc->mm;
+ struct vm_area_struct *vma;
+ int ret = -ESRCH;
+
+ /* attempt per-vma lock first */
+ vma = lock_vma_under_rcu(mm, addr);
+ if (vma) {
+ if (binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ vma_end_read(vma);
+ return ret;
+ }
+
+ /* fall back to mmap_lock */
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, addr);
+ if (vma && binder_alloc_is_mapped(alloc))
+ ret = vm_insert_page(vma, addr, page);
+ mmap_read_unlock(mm);
+
+ return ret;
+}
+
static struct page *binder_page_alloc(struct binder_alloc *alloc,
unsigned long index,
unsigned long addr)
@@ -254,9 +296,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
unsigned long index,
unsigned long addr)
{
- struct vm_area_struct *vma;
struct page *page;
- long npages;
int ret;
if (!mmget_not_zero(alloc->mm))
@@ -268,16 +308,7 @@ static int binder_install_single_page(struct binder_alloc *alloc,
goto out;
}
- mmap_read_lock(alloc->mm);
- vma = vma_lookup(alloc->mm, addr);
- if (!vma || !binder_alloc_is_mapped(alloc)) {
- __free_page(page);
- pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
- ret = -ESRCH;
- goto unlock;
- }
-
- ret = vm_insert_page(vma, addr, page);
+ ret = binder_page_insert(alloc, addr, page);
switch (ret) {
case -EBUSY:
/*
@@ -287,8 +318,8 @@ static int binder_install_single_page(struct binder_alloc *alloc,
*/
ret = 0;
__free_page(page);
- npages = get_user_pages_remote(alloc->mm, addr, 1, 0, &page, NULL);
- if (npages <= 0) {
+ page = binder_page_lookup(alloc, addr);
+ if (!page) {
pr_err("%d: failed to find page at offset %lx\n",
alloc->pid, addr - alloc->vm_start);
ret = -ESRCH;
@@ -306,8 +337,6 @@ static int binder_install_single_page(struct binder_alloc *alloc,
ret = -ENOMEM;
break;
}
-unlock:
- mmap_read_unlock(alloc->mm);
out:
mmput_async(alloc->mm);
return ret;
--
2.47.0.277.g8800431eea-goog