From nobody Fri Dec 19 00:21:58 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id E4EB5C4167B for ; Fri, 1 Dec 2023 17:24:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1379138AbjLARYF (ORCPT ); Fri, 1 Dec 2023 12:24:05 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35796 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1379112AbjLARXh (ORCPT ); Fri, 1 Dec 2023 12:23:37 -0500 Received: from mail-yb1-xb4a.google.com (mail-yb1-xb4a.google.com [IPv6:2607:f8b0:4864:20::b4a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C6A9CD7E for ; Fri, 1 Dec 2023 09:23:10 -0800 (PST) Received: by mail-yb1-xb4a.google.com with SMTP id 3f1490d57ef6-db53c05f802so730605276.0 for ; Fri, 01 Dec 2023 09:23:10 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1701451389; x=1702056189; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=XPjAcXnouh2o+XmMfaTxFmkabR0/vLLDLuDUEN73D48=; b=DaP0newyjse/hhGkIi38DyJzIH1nBW/CmfvgDQ3Thm6oNVUp6Nr26wOazuyDTLcfit dn6DY3arGMPnMhAqBsPARxML7R+w7FEXwOqf3PrzuoNDueiRrVyhCtYjTA0KlCJhUzyF 4qy3buQjqhUYIhgHbdExqmTADkCXAg8qgyE7eK6mgccdFGFXjxY8gCKj8oGQVkzRHWmb a+oQMAdHXa9bC1Wro0mRIe8q1UyJ/Jwm8co58q74B5km/GItdeV1Z9yCqZPgX4894crk sBLA62okPC9eGgSKc65sdtwbZQ2CF7eH8HZLYItQorZu/LoYVgKUwTI1U2Uk1EdfY4Ux gDzQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1701451389; x=1702056189; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=XPjAcXnouh2o+XmMfaTxFmkabR0/vLLDLuDUEN73D48=; b=spXrlBz0Mx8sGJeYbFuDQQrBImsj/XnP10jpCx/t4rOxwl7BeWhhrvhfihT2kAcBrc fsZ1c9+gKwp4Y7jzTOtb3/H1I7X2WZnzH4tOSXmDZ8OsrWwAeUOP+HOOixEXIjxlcFnA poM9nYzy2+nJHnPpMfbbF/5fuaO3XmVDdW13GAvz+u1TvyemTsfRuNpmYB2MsGbsXFD/ kd3pld9NfzZWmRi9V4nZHO/NO2Rq5Nungidn+COTRa+En8KWo05qwFDNPKew3uqtiWIA 4GfW/PvtG7h35dCq6CCxTU6NdpB5hHvwRwXJ6zU4QfoZoSZM6t3HG0Uvbpmf30C/4yFZ I+Lw== X-Gm-Message-State: AOJu0YxPm3jMEmktb3O3wRFzb6YrrBAgy0Inka9mxsIKVaza2/qXBEEb /ElnHfa/38gTwiEzdWAd3dJrSxfFuoDBhw== X-Google-Smtp-Source: AGHT+IESvmBtbsjfeFnQvzKzCLW/p+HSNOeltrLGEFviHPnpS8upuhZnGWFUHNP8U0QGFjZSg5Ta4g/HPqHaQw== X-Received: from xllamas.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5070]) (user=cmllamas job=sendgmr) by 2002:a25:33c2:0:b0:da0:6216:7990 with SMTP id z185-20020a2533c2000000b00da062167990mr1047568ybz.3.1701451389726; Fri, 01 Dec 2023 09:23:09 -0800 (PST) Date: Fri, 1 Dec 2023 17:21:45 +0000 In-Reply-To: <20231201172212.1813387-1-cmllamas@google.com> Mime-Version: 1.0 References: <20231201172212.1813387-1-cmllamas@google.com> X-Mailer: git-send-email 2.43.0.rc2.451.g8631bc7472-goog Message-ID: <20231201172212.1813387-17-cmllamas@google.com> Subject: [PATCH v2 16/28] binder: refactor page range allocation From: Carlos Llamas To: Greg Kroah-Hartman , "=?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?=" , Todd Kjos , Martijn Coenen , Joel Fernandes , Christian Brauner , Carlos Llamas , Suren Baghdasaryan Cc: linux-kernel@vger.kernel.org, kernel-team@android.com, Alice Ryhl Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Instead of looping through the page range twice to first determine if the mmap lock is required, simply do it per-page as needed. Split out all this logic into a separate binder_install_single_page() function. Reviewed-by: Alice Ryhl Signed-off-by: Carlos Llamas --- drivers/android/binder_alloc.c | 107 +++++++++++++++------------------ 1 file changed, 47 insertions(+), 60 deletions(-) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 99eacd8782b8..1caf0e3d3451 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -199,14 +199,51 @@ static void binder_free_page_range(struct binder_allo= c *alloc, } } =20 +static int binder_install_single_page(struct binder_alloc *alloc, + struct binder_lru_page *lru_page, + unsigned long addr) +{ + struct page *page; + int ret =3D 0; + + if (!mmget_not_zero(alloc->mm)) + return -ESRCH; + + mmap_write_lock(alloc->mm); + if (!alloc->vma) { + pr_err("%d: %s failed, no vma\n", alloc->pid, __func__); + ret =3D -ESRCH; + goto out; + } + + page =3D alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); + if (!page) { + pr_err("%d: failed to allocate page\n", alloc->pid); + ret =3D -ENOMEM; + goto out; + } + + ret =3D vm_insert_page(alloc->vma, addr, page); + if (ret) { + pr_err("%d: %s failed to insert page at %lx with %d\n", + alloc->pid, __func__, addr, ret); + __free_page(page); + ret =3D -ENOMEM; + goto out; + } + + lru_page->page_ptr =3D page; +out: + mmap_write_unlock(alloc->mm); + mmput_async(alloc->mm); + return ret; +} + static int binder_allocate_page_range(struct binder_alloc *alloc, unsigned long start, unsigned long end) { - struct vm_area_struct *vma =3D NULL; struct binder_lru_page *page; - struct mm_struct *mm =3D NULL; unsigned long page_addr; - bool need_mm =3D false; =20 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: allocate pages %lx-%lx\n", @@ -218,32 +255,9 @@ static int binder_allocate_page_range(struct binder_al= loc *alloc, trace_binder_update_page_range(alloc, true, start, end); =20 for (page_addr =3D start; page_addr < end; page_addr +=3D PAGE_SIZE) { - page =3D &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; - if (!page->page_ptr) { - need_mm =3D true; - break; - } - } - - if (need_mm && mmget_not_zero(alloc->mm)) - mm =3D alloc->mm; - - if (mm) { - mmap_write_lock(mm); - vma =3D alloc->vma; - } - - if (!vma && need_mm) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - alloc->pid); - goto err_no_vma; - } - - for (page_addr =3D start; page_addr < end; page_addr +=3D PAGE_SIZE) { - int ret; + unsigned long index; bool on_lru; - size_t index; + int ret; =20 index =3D (page_addr - alloc->buffer) / PAGE_SIZE; page =3D &alloc->pages[index]; @@ -258,26 +272,15 @@ static int binder_allocate_page_range(struct binder_a= lloc *alloc, continue; } =20 - if (WARN_ON(!vma)) - goto err_page_ptr_cleared; - trace_binder_alloc_page_start(alloc, index); - page->page_ptr =3D alloc_page(GFP_KERNEL | - __GFP_HIGHMEM | - __GFP_ZERO); - if (!page->page_ptr) { - pr_err("%d: binder_alloc_buf failed for page at %lx\n", - alloc->pid, page_addr); - goto err_alloc_page_failed; - } + page->alloc =3D alloc; INIT_LIST_HEAD(&page->lru); =20 - ret =3D vm_insert_page(vma, page_addr, page->page_ptr); + ret =3D binder_install_single_page(alloc, page, page_addr); if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", - alloc->pid, page_addr); - goto err_vm_insert_page_failed; + binder_free_page_range(alloc, start, page_addr); + return ret; } =20 if (index + 1 > alloc->pages_high) @@ -285,24 +288,8 @@ static int binder_allocate_page_range(struct binder_al= loc *alloc, =20 trace_binder_alloc_page_end(alloc, index); } - if (mm) { - mmap_write_unlock(mm); - mmput_async(mm); - } - return 0; =20 -err_vm_insert_page_failed: - __free_page(page->page_ptr); - page->page_ptr =3D NULL; -err_alloc_page_failed: -err_page_ptr_cleared: - binder_free_page_range(alloc, start, page_addr); -err_no_vma: - if (mm) { - mmap_write_unlock(mm); - mmput_async(mm); - } - return vma ? -ENOMEM : -ESRCH; + return 0; } =20 static inline void binder_alloc_set_vma(struct binder_alloc *alloc, --=20 2.43.0.rc2.451.g8631bc7472-goog