From nobody Wed Dec 31 14:26:50 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 20DE9C4332F for ; Thu, 2 Nov 2023 19:02:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1377526AbjKBTCM (ORCPT ); Thu, 2 Nov 2023 15:02:12 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:34880 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1377516AbjKBTBg (ORCPT ); Thu, 2 Nov 2023 15:01:36 -0400 Received: from mail-pg1-x54a.google.com (mail-pg1-x54a.google.com [IPv6:2607:f8b0:4864:20::54a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7451010D9 for ; Thu, 2 Nov 2023 12:01:14 -0700 (PDT) Received: by mail-pg1-x54a.google.com with SMTP id 41be03b00d2f7-5a08e5c7debso1021895a12.2 for ; Thu, 02 Nov 2023 12:01:14 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=20230601; t=1698951674; x=1699556474; darn=vger.kernel.org; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:from:to:cc:subject:date:message-id:reply-to; bh=wUibwtAc1IWnU68Zw2Q1xytGZZQWnXYrj0ZwNzGtCCI=; b=Sok05ZNWUXTIQMGJ6VAwBCOH7clSyuNYbp/fikdGSITgmbND+2zn6MduBgoyLVwSIu J7VKg+x6gHr93ebN/XJEjB2uEaiK7QRHhFEFmR/0Ix3iqg9/GPVlfyU5PuV/tfEHsL7o tVFrBiOZlQb9FIXFscOARf9sWVHlWjjfXXLaL2FMn8Pwbu3iywrwGAvJJqNYgn1lNnt2 XzBwV/Ze1Mu5Myy26FbxhI5d6xUzOKpufEDES7EGcin9eADWvrz2iTMDh0KuHFtpdCMu funobyZX2yyjn6IXxjhPmkj3161uGvFzaXM97TGKyoKOGTVymCSiLL6pEFKuXqV/G0KA CFKw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1698951674; x=1699556474; h=cc:to:from:subject:message-id:references:mime-version:in-reply-to :date:x-gm-message-state:from:to:cc:subject:date:message-id:reply-to; bh=wUibwtAc1IWnU68Zw2Q1xytGZZQWnXYrj0ZwNzGtCCI=; b=YHKZAqkZLojG00yBCvDL0tZfMmjoV8ffE7qanYSmESBt3+VOT/W7vRakAuaivz4pCL gbjwM5lHjCVwNIZnkstOGqJ/YucHfcgpyRMM2bKuAW4VBgz9wPJ5+vrpkW7L3oDnARXJ a5XVrNxWuhCEqwchvceqH/j4JLiDda4oLqLzCsOJ9U625Gcb5LTEYz7buWLeD1trzYZX 3P2KmNquJvrQE2fei6YpEYqoeEMKZQpJ7jTDYntdMyZcnR4SI0XmKSEWEU1ymbwmppz3 exaRO1bhwqyt/pFtob0dFw0Liwj8uQ0Vf2TNNijTwwAKTVLv16KJTGhc1VQcdXBorvFp ubRg== X-Gm-Message-State: AOJu0YxIRE2mQDSrKlHqJ5108V/RQtMjhG2hHx19zeJ7o2SlcDwwuqNu dq+Q00dDmLOEsA6iscVAq67umdrmVmjpqw== X-Google-Smtp-Source: AGHT+IGg7tSZSm1VIJd4sWv53kOYTbUXQN2p3he89Hstrd+TXBZk+RBkHkkLH3qeMTUw/YW6yCvaw9Zcaa/rTw== X-Received: from xllamas.c.googlers.com ([fda3:e722:ac3:cc00:7f:e700:c0a8:5070]) (user=cmllamas job=sendgmr) by 2002:a63:344b:0:b0:5a9:fb7b:7f1d with SMTP id b72-20020a63344b000000b005a9fb7b7f1dmr359399pga.0.1698951673785; Thu, 02 Nov 2023 12:01:13 -0700 (PDT) Date: Thu, 2 Nov 2023 18:59:20 +0000 In-Reply-To: <20231102185934.773885-1-cmllamas@google.com> Mime-Version: 1.0 References: <20231102185934.773885-1-cmllamas@google.com> X-Mailer: git-send-email 2.42.0.869.gea05f2083d-goog Message-ID: <20231102185934.773885-20-cmllamas@google.com> Subject: [PATCH 19/21] binder: perform page allocation outside of locks From: Carlos Llamas To: Greg Kroah-Hartman , "=?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?=" , Todd Kjos , Martijn Coenen , Joel Fernandes , Christian Brauner , Carlos Llamas , Suren Baghdasaryan Cc: linux-kernel@vger.kernel.org, kernel-team@android.com Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Split out the insertion of pages to be done outside of the alloc->mutex in a separate binder_get_pages_range() routine. Since this is no longer serialized with other requests we need to make sure we look at the full range of pages for this buffer, including those shared with neighboring buffers. The insertion of pages into the vma is still serialized with the mmap write lock. Besides avoiding unnecessary nested locking this helps in preparation of switching the alloc->mutex into a spinlock_t in subsequent patches. Signed-off-by: Carlos Llamas --- drivers/android/binder_alloc.c | 85 ++++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 29 deletions(-) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 7e0af1786b20..e739be7f2dd4 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -194,6 +194,9 @@ static void binder_free_page_range(struct binder_alloc = *alloc, index =3D (page_addr - alloc->buffer) / PAGE_SIZE; page =3D &alloc->pages[index]; =20 + if (!page->page_ptr) + continue; + trace_binder_free_lru_start(alloc, index); =20 ret =3D list_lru_add(&binder_alloc_lru, &page->lru); @@ -214,6 +217,9 @@ static int binder_get_user_page_remote(struct binder_al= loc *alloc, return -ESRCH; =20 mmap_write_lock(alloc->mm); + if (lru_page->page_ptr) + goto out; + if (!alloc->vma) { pr_err("%d: %s failed, no vma\n", alloc->pid, __func__); ret =3D -ESRCH; @@ -236,32 +242,64 @@ static int binder_get_user_page_remote(struct binder_= alloc *alloc, goto out; } =20 - lru_page->page_ptr =3D page; + /* mark page insertion complete and safe to acquire */ + smp_store_release(&lru_page->page_ptr, page); out: mmap_write_unlock(alloc->mm); mmput_async(alloc->mm); return ret; } =20 -static int binder_allocate_page_range(struct binder_alloc *alloc, - unsigned long start, unsigned long end) +/* The range of pages should include those shared with other buffers */ +static int binder_get_page_range(struct binder_alloc *alloc, + unsigned long start, unsigned long end) { struct binder_lru_page *page; unsigned long page_addr; =20 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: allocate pages %lx-%lx\n", + "%d: get pages %lx-%lx\n", alloc->pid, start, end); =20 - if (end <=3D start) - return 0; + for (page_addr =3D start; page_addr < end; page_addr +=3D PAGE_SIZE) { + unsigned long index; + int ret; + + index =3D (page_addr - alloc->buffer) / PAGE_SIZE; + page =3D &alloc->pages[index]; + + /* check if page insertion is marked complete by release */ + if (smp_load_acquire(&page->page_ptr)) + continue; + + trace_binder_alloc_page_start(alloc, index); + + ret =3D binder_get_user_page_remote(alloc, page, page_addr); + if (ret) + return ret; + + trace_binder_alloc_page_end(alloc, index); + } + + return 0; +} + +/* The range of pages should exclude those shared with other buffers */ +static void binder_allocate_page_range(struct binder_alloc *alloc, + unsigned long start, unsigned long end) +{ + struct binder_lru_page *page; + unsigned long page_addr; + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: allocate pages %lx-%lx\n", + alloc->pid, start, end); =20 trace_binder_update_page_range(alloc, true, start, end); =20 for (page_addr =3D start; page_addr < end; page_addr +=3D PAGE_SIZE) { unsigned long index; bool on_lru; - int ret; =20 index =3D (page_addr - alloc->buffer) / PAGE_SIZE; page =3D &alloc->pages[index]; @@ -276,21 +314,9 @@ static int binder_allocate_page_range(struct binder_al= loc *alloc, continue; } =20 - trace_binder_alloc_page_start(alloc, index); - - ret =3D binder_get_user_page_remote(alloc, page, page_addr); - if (ret) { - binder_free_page_range(alloc, start, page_addr); - return ret; - } - if (index + 1 > alloc->pages_high) alloc->pages_high =3D index + 1; - - trace_binder_alloc_page_end(alloc, index); } - - return 0; } =20 static inline void binder_alloc_set_vma(struct binder_alloc *alloc, @@ -410,7 +436,6 @@ static struct binder_buffer *binder_alloc_new_buf_locke= d( unsigned long has_page_addr; unsigned long end_page_addr; size_t buffer_size; - int ret; =20 if (is_async && alloc->free_async_space < size + sizeof(struct binder_buffer)) { @@ -453,18 +478,14 @@ static struct binder_buffer *binder_alloc_new_buf_loc= ked( "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", alloc->pid, size, buffer, buffer_size); =20 - has_page_addr =3D (buffer->user_data + buffer_size) & PAGE_MASK; WARN_ON(n && buffer_size !=3D size); + + has_page_addr =3D (buffer->user_data + buffer_size) & PAGE_MASK; end_page_addr =3D PAGE_ALIGN(buffer->user_data + size); if (end_page_addr > has_page_addr) end_page_addr =3D has_page_addr; - ret =3D binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data), - end_page_addr); - if (ret) { - buffer =3D ERR_PTR(ret); - goto out; - } - + binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data), + end_page_addr); if (buffer_size !=3D size) { new_buffer->user_data =3D buffer->user_data + size; list_add(&new_buffer->entry, &buffer->entry); @@ -491,7 +512,6 @@ static struct binder_buffer *binder_alloc_new_buf_locke= d( buffer->oneway_spam_suspect =3D true; } =20 -out: /* discard possibly unused new_buffer */ kfree(new_buffer); return buffer; @@ -520,6 +540,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binde= r_alloc *alloc, { struct binder_buffer *buffer, *next; size_t size; + int ret; =20 /* Check binder_alloc is fully initialized */ if (!binder_alloc_get_vma(alloc)) { @@ -564,6 +585,12 @@ struct binder_buffer *binder_alloc_new_buf(struct bind= er_alloc *alloc, buffer->extra_buffers_size =3D extra_buffers_size; buffer->pid =3D current->tgid; =20 + ret =3D binder_get_page_range(alloc, buffer->user_data & PAGE_MASK, + PAGE_ALIGN(buffer->user_data + size)); + if (ret) { + binder_alloc_free_buf(alloc, buffer); + buffer =3D ERR_PTR(ret); + } out: return buffer; } --=20 2.42.0.869.gea05f2083d-goog