From nobody Thu May 2 22:54:12 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0D8EEC76196 for ; Tue, 28 Mar 2023 09:59:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232909AbjC1J7N (ORCPT ); Tue, 28 Mar 2023 05:59:13 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:46932 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232642AbjC1J66 (ORCPT ); Tue, 28 Mar 2023 05:58:58 -0400 Received: from mail-pf1-x431.google.com (mail-pf1-x431.google.com [IPv6:2607:f8b0:4864:20::431]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 801AF55B8 for ; Tue, 28 Mar 2023 02:58:33 -0700 (PDT) Received: by mail-pf1-x431.google.com with SMTP id s8so7588127pfk.5 for ; Tue, 28 Mar 2023 02:58:33 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=bytedance.com; s=google; t=1679997513; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=oXReBzDsI5mUppZ3rGyjpCu82GIXuKJKMV0lZiD0JNk=; b=GNYV/SpQ4PF1Rk9guHwx0aYyiVbzxvCe8LgWKY4fKwXFWvJ0gHYKR9uI9Hyt7SuIba lqZhGe+iNAZBDttHfVPfrJtzXlSY4W0BwXLoi8+mDBLpZrOaqvyR3cnJuUvQfnijfFWF DgXTjFXJbvdNpSpbmeMZZq9T6E47bY/Bfun3K0PrAfzuydbRgjCh+tcBC+yQ0FegohJ2 LDusHWD4zsYHXyzzOsDLqMZyFzXOZExMZ6+6Nz5aE4HYnhqn7ndtS3fNHvJhBx1KBdK6 QyPuMZMYaMgxyNGf7ut9mP8hd0RslzKwLhfQJ46QWZOfUXnZ+YLqil4EchVkeDWoIrO7 Af0A== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679997513; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=oXReBzDsI5mUppZ3rGyjpCu82GIXuKJKMV0lZiD0JNk=; b=oEWvUltUFHld7pUlLYVMLs/kcHcHaAvSzSkVCYu6e6xclZiTQswjNidqDuyFr0xP8k hRCX/okzEpqYmicLYvvlXvgZGyqgb2DPuftB+udH9PMxQHjAuZAr+AaMsTW2TSzacNoM nLpt1i5v/8gJ+Ik8u9QiMsmcBTm4kffGw9x82KUfXBO+BUjcQtNA6W/fMO/SRGhncwV0 E+J8VpC/OmAe6dxwWkORxUsxOL0cZZoQb183PvJMg5tuftPQbMbH7/dM/evUe2K/DfiT fCMdD/G81bbTZXsUPs2NVz8dgcKIiTaYxpmQrEapDxCFLVC/4X+noGYZY3o3fQ1mE8Ps MRIw== X-Gm-Message-State: AAQBX9ciAxl6pgDqYgogexxESNGXL2EO7SW5hGBt2Xs/1MR+byMfm9pc knbYtgofVG8zs5b1cMnlYSJquQ== X-Google-Smtp-Source: AKy350a8z+tgtFIvSjXB4Jv/VaYEvsSRl5/1SyRdomnZTLXT1oaUTne7LXl5YF+58ZBqG0o4zrBD6g== X-Received: by 2002:aa7:96f8:0:b0:600:cc40:2589 with SMTP id i24-20020aa796f8000000b00600cc402589mr3361407pfq.3.1679997512966; Tue, 28 Mar 2023 02:58:32 -0700 (PDT) Received: from PXLDJ45XCM.bytedance.net ([139.177.225.236]) by smtp.gmail.com with ESMTPSA id m26-20020aa78a1a000000b005a8a5be96b2sm17207556pfa.104.2023.03.28.02.58.28 (version=TLS1_3 cipher=TLS_CHACHA20_POLY1305_SHA256 bits=256/256); Tue, 28 Mar 2023 02:58:32 -0700 (PDT) From: Muchun Song To: glider@google.com, elver@google.com, dvyukov@google.com, akpm@linux-foundation.org, jannh@google.com, sjpark@amazon.de, muchun.song@linux.dev Cc: kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org, Muchun Song Subject: [PATCH 1/6] mm: kfence: simplify kfence pool initialization Date: Tue, 28 Mar 2023 17:58:02 +0800 Message-Id: <20230328095807.7014-2-songmuchun@bytedance.com> X-Mailer: git-send-email 2.37.1 (Apple Git-137.1) In-Reply-To: <20230328095807.7014-1-songmuchun@bytedance.com> References: <20230328095807.7014-1-songmuchun@bytedance.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" There are three similar loops to initialize kfence pool, we could merge all of them into one loop to simplify the code and make code more efficient. Signed-off-by: Muchun Song Reviewed-by: Marco Elver --- mm/kfence/core.c | 47 ++++++----------------------------------------- 1 file changed, 6 insertions(+), 41 deletions(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 7d01a2c76e80..de62a84d4830 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -539,35 +539,10 @@ static void rcu_guarded_free(struct rcu_head *h) static unsigned long kfence_init_pool(void) { unsigned long addr =3D (unsigned long)__kfence_pool; - struct page *pages; int i; =20 if (!arch_kfence_init_pool()) return addr; - - pages =3D virt_to_page(__kfence_pool); - - /* - * Set up object pages: they must have PG_slab set, to avoid freeing - * these as real pages. - * - * We also want to avoid inserting kfence_free() in the kfree() - * fast-path in SLUB, and therefore need to ensure kfree() correctly - * enters __slab_free() slow-path. - */ - for (i =3D 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab =3D page_slab(nth_page(pages, i)); - - if (!i || (i % 2)) - continue; - - __folio_set_slab(slab_folio(slab)); -#ifdef CONFIG_MEMCG - slab->memcg_data =3D (unsigned long)&kfence_metadata[i / 2 - 1].objcg | - MEMCG_DATA_OBJCGS; -#endif - } - /* * Protect the first 2 pages. The first page is mostly unnecessary, and * merely serves as an extended guard page. However, adding one @@ -581,8 +556,9 @@ static unsigned long kfence_init_pool(void) addr +=3D PAGE_SIZE; } =20 - for (i =3D 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + for (i =3D 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++, addr +=3D 2 * PAGE_SIZE= ) { struct kfence_metadata *meta =3D &kfence_metadata[i]; + struct slab *slab =3D page_slab(virt_to_page(addr)); =20 /* Initialize metadata. */ INIT_LIST_HEAD(&meta->list); @@ -593,26 +569,15 @@ static unsigned long kfence_init_pool(void) =20 /* Protect the right redzone. */ if (unlikely(!kfence_protect(addr + PAGE_SIZE))) - goto reset_slab; - - addr +=3D 2 * PAGE_SIZE; - } - - return 0; - -reset_slab: - for (i =3D 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab =3D page_slab(nth_page(pages, i)); + return addr; =20 - if (!i || (i % 2)) - continue; + __folio_set_slab(slab_folio(slab)); #ifdef CONFIG_MEMCG - slab->memcg_data =3D 0; + slab->memcg_data =3D (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS; #endif - __folio_clear_slab(slab_folio(slab)); } =20 - return addr; + return 0; } =20 static bool __init kfence_init_pool_early(void) --=20 2.11.0 From nobody Thu May 2 22:54:12 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id BEF67C76196 for ; Tue, 28 Mar 2023 09:59:22 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232954AbjC1J7V (ORCPT ); Tue, 28 Mar 2023 05:59:21 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47062 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232598AbjC1J7A (ORCPT ); Tue, 28 Mar 2023 05:59:00 -0400 Received: from mail-pg1-x536.google.com (mail-pg1-x536.google.com [IPv6:2607:f8b0:4864:20::536]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 2EC43619C for ; Tue, 28 Mar 2023 02:58:38 -0700 (PDT) Received: by mail-pg1-x536.google.com with SMTP id bn14so6829310pgb.11 for ; Tue, 28 Mar 2023 02:58:38 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=bytedance.com; s=google; t=1679997517; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=lBD4yaWYkYDbLkNhZA944C9ObF80BX09B3zGBKkIydI=; b=HhbQB+dgFelVX19imcjsNLuo23G94LeWSU/HGZgUiVNfjyKVW+HiToJUnagtKSthBY 5kq+NvO/ctnPo47Twm8reDS0d5f0bC6BH3c9qB+Q9Gjw3RaCZ+yyNCqRN0jzkzO4hh9Z MgZCQ7XZwY1fjVVJrlrFKYiY+51dScHVJrHlEvymXRrBLu2hClZ3UbSwxLjhj/YFxX64 vMWId0i1jE82E/qg7Sr38pLdxec55XLsUoXO4nMH1EQgjTwoGGcRX4UGDsco7eCfVB8d cLv4R1G7yh9yGkneEds787t4phoeg1nJ0GlxivunqwTSMcQ5LysGeIuwdCvjaRqL97IT maTw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679997517; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=lBD4yaWYkYDbLkNhZA944C9ObF80BX09B3zGBKkIydI=; b=Hm0Lg7nL5s8KVSe9btAyxYS7QNtKUo9fHS+9GM5L7WEhtO5rO4qS/nsIw4cNNkgGnu Lms4VFj/VPYaI4McW/LWUo/WKH6hLv8ZlwZ2/QRg6BIguQ3oFyrqvvfMJFDk/OcsazTX ndR/TbXldBLHbk7WJYMuZUDE1aCVX8ozHl+fUHgOOfpl8K4H2OFpm76h6p51QIDB0qkU SERsnHxjJlPDTIPthzHn1i1sDVFQ+DNH43zhdqkQoFMfz9B6G0xuBTumzSAJOx3WaCb0 1KJgNG9BU9TiL1XbJ4Os5A7E7+reRS4y4/JoUCesHfz023aIxTIjS/tiXJ4/bxIqJ92Y t0ig== X-Gm-Message-State: AAQBX9cqP1Obqxo2ePSomfPkXOtEnuTMTSlSmNsoZVejZswGozIlBfdu z36vkEep4bb0otgmZzTl5Uoj9w== X-Google-Smtp-Source: AKy350aBkCEiteWTRB8arAPPUAGcyuriJ2lhLAbvPCn/eGNEdVUaJTvoxyZvHgGLTc4pYAyj0IqVBw== X-Received: by 2002:a62:1dca:0:b0:627:de2e:f1a5 with SMTP id d193-20020a621dca000000b00627de2ef1a5mr13507673pfd.4.1679997517751; Tue, 28 Mar 2023 02:58:37 -0700 (PDT) Received: from PXLDJ45XCM.bytedance.net ([139.177.225.236]) by smtp.gmail.com with ESMTPSA id m26-20020aa78a1a000000b005a8a5be96b2sm17207556pfa.104.2023.03.28.02.58.33 (version=TLS1_3 cipher=TLS_CHACHA20_POLY1305_SHA256 bits=256/256); Tue, 28 Mar 2023 02:58:37 -0700 (PDT) From: Muchun Song To: glider@google.com, elver@google.com, dvyukov@google.com, akpm@linux-foundation.org, jannh@google.com, sjpark@amazon.de, muchun.song@linux.dev Cc: kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org, Muchun Song Subject: [PATCH 2/6] mm: kfence: check kfence pool size at building time Date: Tue, 28 Mar 2023 17:58:03 +0800 Message-Id: <20230328095807.7014-3-songmuchun@bytedance.com> X-Mailer: git-send-email 2.37.1 (Apple Git-137.1) In-Reply-To: <20230328095807.7014-1-songmuchun@bytedance.com> References: <20230328095807.7014-1-songmuchun@bytedance.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Check kfence pool size at building time to expose problem ASAP. Signed-off-by: Muchun Song --- mm/kfence/core.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index de62a84d4830..6781af1dfa66 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -841,10 +841,9 @@ static int kfence_init_late(void) return -ENOMEM; __kfence_pool =3D page_to_virt(pages); #else - if (nr_pages > MAX_ORDER_NR_PAGES) { - pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); - return -EINVAL; - } + BUILD_BUG_ON_MSG(get_order(KFENCE_POOL_SIZE) > MAX_ORDER, + "CONFIG_KFENCE_NUM_OBJECTS is too large for buddy allocator"); + __kfence_pool =3D alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); if (!__kfence_pool) return -ENOMEM; --=20 2.11.0 From nobody Thu May 2 22:54:12 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id AFD28C761A6 for ; Tue, 28 Mar 2023 09:58:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232345AbjC1J6u (ORCPT ); Tue, 28 Mar 2023 05:58:50 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47120 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232590AbjC1J6q (ORCPT ); Tue, 28 Mar 2023 05:58:46 -0400 Received: from mail-pl1-x632.google.com (mail-pl1-x632.google.com [IPv6:2607:f8b0:4864:20::632]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 79AC461A9 for ; Tue, 28 Mar 2023 02:58:43 -0700 (PDT) Received: by mail-pl1-x632.google.com with SMTP id kc4so11110273plb.10 for ; Tue, 28 Mar 2023 02:58:43 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=bytedance.com; s=google; t=1679997523; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=hUuhgY917ceHjuJWRr2UIN+zKxCzAXrecq/+Z8gmroA=; b=RU+saPvgKQNlmWTq+bdeYrfwyFkXqc8uO9OS/o1Zx1EI0DNYBWRSf0QGSblvehP0sW RM+07ZqnzFzu4DjBduQxRxeI/bnGLwWGrG69+xCnQrjxWyHaTi5IbLa4s50piAEl1g8Z FlyBovcoVrO8jQLazmCKoXrZOtd8V+0q/bPp446qMiJO2zBeSdxk4Cj7Dpz9gEJmYkFC 45eHsrxKLK73f5ZeYjUihiNtWuQZSNL7GMdA5wqC0iRWUxlzED66XOPpFiqLt0C/pO7D XgLxesijy3b0waONiSKt0aj5vxTHkOjCcUzKulf750LwwtxWInWbOsnCElj5hV/LB0dL z6oA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679997523; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=hUuhgY917ceHjuJWRr2UIN+zKxCzAXrecq/+Z8gmroA=; b=nk8CI6TsRnXQ2yhBVff5JT7d/YjL6QVUaeZpLKmzAKU5Ul2CvudZFRdQC+KxQpPypw Bzd5758TMHp5Xr02ZrbxA9SJQiB1Sxh1hCtDythKxjnsrYd6wL2nCU/HMCupHZejmUIL pAxl3BHyiKlYBAIMe683+v10gILonJrMEqmAMbM6H36/2GeXaZ6CRQifeSv6mQahEPI6 0FB7RNp3RbxD2QomZv/eO+IMSl5gzbpIDQWg5j+kaXv8LY3QMCGnfwyBeu99gNxYO3gQ k4lSm2Nb1rivgAzCxYIqTfxYWWug/+B13ABuQICi9Rj2bFLz3TMa/t2y3QQAwzYYz6PQ IYxA== X-Gm-Message-State: AO0yUKXCJazAJJBXLRtPjGKndN7gDbfdEEk6oYc6oJp9l045RwoVGIAt sDW/ALK5ZQ5/GGsMPKZguuItEg== X-Google-Smtp-Source: AK7set8K3WCn7qGv5XrCwVU77b7YcNrErbvROMxOijirnK/PKs0Os7YyvQj1wcB+W7SDvFzggqBDvg== X-Received: by 2002:a05:6a20:c119:b0:d4:77a6:156f with SMTP id bh25-20020a056a20c11900b000d477a6156fmr12759375pzb.53.1679997522898; Tue, 28 Mar 2023 02:58:42 -0700 (PDT) Received: from PXLDJ45XCM.bytedance.net ([139.177.225.236]) by smtp.gmail.com with ESMTPSA id m26-20020aa78a1a000000b005a8a5be96b2sm17207556pfa.104.2023.03.28.02.58.38 (version=TLS1_3 cipher=TLS_CHACHA20_POLY1305_SHA256 bits=256/256); Tue, 28 Mar 2023 02:58:42 -0700 (PDT) From: Muchun Song To: glider@google.com, elver@google.com, dvyukov@google.com, akpm@linux-foundation.org, jannh@google.com, sjpark@amazon.de, muchun.song@linux.dev Cc: kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org, Muchun Song Subject: [PATCH 3/6] mm: kfence: make kfence_protect_page() void Date: Tue, 28 Mar 2023 17:58:04 +0800 Message-Id: <20230328095807.7014-4-songmuchun@bytedance.com> X-Mailer: git-send-email 2.37.1 (Apple Git-137.1) In-Reply-To: <20230328095807.7014-1-songmuchun@bytedance.com> References: <20230328095807.7014-1-songmuchun@bytedance.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The arch_kfence_init_pool() make sure kfence pool is mapped with base page size (e.g. 4KB), so the following PTE lookup in kfence_protect_page() will always succeed. Then there is no way to stop kfence_protect_page() always returning true, so make it void to simplify the code. Signed-off-by: Muchun Song --- arch/arm/include/asm/kfence.h | 4 +- arch/arm64/include/asm/kfence.h | 4 +- arch/parisc/include/asm/kfence.h | 7 +- arch/powerpc/include/asm/kfence.h | 8 +-- arch/riscv/include/asm/kfence.h | 4 +- arch/s390/include/asm/kfence.h | 3 +- arch/x86/include/asm/kfence.h | 9 +-- mm/kfence/core.c | 142 +++++++++++++++++-----------------= ---- 8 files changed, 73 insertions(+), 108 deletions(-) diff --git a/arch/arm/include/asm/kfence.h b/arch/arm/include/asm/kfence.h index 7980d0f2271f..c30a5f8125e8 100644 --- a/arch/arm/include/asm/kfence.h +++ b/arch/arm/include/asm/kfence.h @@ -43,11 +43,9 @@ static inline bool arch_kfence_init_pool(void) return true; } =20 -static inline bool kfence_protect_page(unsigned long addr, bool protect) +static inline void kfence_protect_page(unsigned long addr, bool protect) { set_memory_valid(addr, 1, !protect); - - return true; } =20 #endif /* __ASM_ARM_KFENCE_H */ diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfenc= e.h index a81937fae9f6..7717c6d98b6f 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -12,11 +12,9 @@ =20 static inline bool arch_kfence_init_pool(void) { return true; } =20 -static inline bool kfence_protect_page(unsigned long addr, bool protect) +static inline void kfence_protect_page(unsigned long addr, bool protect) { set_memory_valid(addr, 1, !protect); - - return true; } =20 #ifdef CONFIG_KFENCE diff --git a/arch/parisc/include/asm/kfence.h b/arch/parisc/include/asm/kfe= nce.h index 6259e5ac1fea..290792009315 100644 --- a/arch/parisc/include/asm/kfence.h +++ b/arch/parisc/include/asm/kfence.h @@ -19,13 +19,10 @@ static inline bool arch_kfence_init_pool(void) } =20 /* Protect the given page and flush TLB. */ -static inline bool kfence_protect_page(unsigned long addr, bool protect) +static inline void kfence_protect_page(unsigned long addr, bool protect) { pte_t *pte =3D virt_to_kpte(addr); =20 - if (WARN_ON(!pte)) - return false; - /* * We need to avoid IPIs, as we may get KFENCE allocations or faults * with interrupts disabled. @@ -37,8 +34,6 @@ static inline bool kfence_protect_page(unsigned long addr= , bool protect) set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); =20 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); - - return true; } =20 #endif /* _ASM_PARISC_KFENCE_H */ diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/k= fence.h index 6fd2b4d486c5..9d8502a7d0a4 100644 --- a/arch/powerpc/include/asm/kfence.h +++ b/arch/powerpc/include/asm/kfence.h @@ -21,16 +21,14 @@ static inline bool arch_kfence_init_pool(void) } =20 #ifdef CONFIG_PPC64 -static inline bool kfence_protect_page(unsigned long addr, bool protect) +static inline void kfence_protect_page(unsigned long addr, bool protect) { struct page *page =3D virt_to_page(addr); =20 __kernel_map_pages(page, 1, !protect); - - return true; } #else -static inline bool kfence_protect_page(unsigned long addr, bool protect) +static inline void kfence_protect_page(unsigned long addr, bool protect) { pte_t *kpte =3D virt_to_kpte(addr); =20 @@ -40,8 +38,6 @@ static inline bool kfence_protect_page(unsigned long addr= , bool protect) } else { pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0); } - - return true; } #endif =20 diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfenc= e.h index d887a54042aa..1299f47170b5 100644 --- a/arch/riscv/include/asm/kfence.h +++ b/arch/riscv/include/asm/kfence.h @@ -46,7 +46,7 @@ static inline bool arch_kfence_init_pool(void) return true; } =20 -static inline bool kfence_protect_page(unsigned long addr, bool protect) +static inline void kfence_protect_page(unsigned long addr, bool protect) { pte_t *pte =3D virt_to_kpte(addr); =20 @@ -56,8 +56,6 @@ static inline bool kfence_protect_page(unsigned long addr= , bool protect) set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); =20 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); - - return true; } =20 #endif /* _ASM_RISCV_KFENCE_H */ diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h index d55ba878378b..6d7b3632d79c 100644 --- a/arch/s390/include/asm/kfence.h +++ b/arch/s390/include/asm/kfence.h @@ -33,10 +33,9 @@ static __always_inline void kfence_split_mapping(void) #endif } =20 -static inline bool kfence_protect_page(unsigned long addr, bool protect) +static inline void kfence_protect_page(unsigned long addr, bool protect) { __kernel_map_pages(virt_to_page(addr), 1, !protect); - return true; } =20 #endif /* _ASM_S390_KFENCE_H */ diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h index ff5c7134a37a..6ffd4a078a71 100644 --- a/arch/x86/include/asm/kfence.h +++ b/arch/x86/include/asm/kfence.h @@ -38,13 +38,9 @@ static inline bool arch_kfence_init_pool(void) } =20 /* Protect the given page and flush TLB. */ -static inline bool kfence_protect_page(unsigned long addr, bool protect) +static inline void kfence_protect_page(unsigned long addr, bool protect) { - unsigned int level; - pte_t *pte =3D lookup_address(addr, &level); - - if (WARN_ON(!pte || level !=3D PG_LEVEL_4K)) - return false; + pte_t *pte =3D virt_to_kpte(addr); =20 /* * We need to avoid IPIs, as we may get KFENCE allocations or faults @@ -65,7 +61,6 @@ static inline bool kfence_protect_page(unsigned long addr= , bool protect) preempt_disable(); flush_tlb_one_kernel(addr); preempt_enable(); - return true; } =20 #endif /* !MODULE */ diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 6781af1dfa66..5726bf2ae13c 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -229,14 +229,14 @@ static bool alloc_covered_contains(u32 alloc_stack_ha= sh) return true; } =20 -static bool kfence_protect(unsigned long addr) +static inline void kfence_protect(unsigned long addr) { - return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), = true)); + kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true); } =20 -static bool kfence_unprotect(unsigned long addr) +static inline void kfence_unprotect(unsigned long addr) { - return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), = false)); + kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false); } =20 static inline unsigned long metadata_to_pageaddr(const struct kfence_metad= ata *meta) @@ -531,30 +531,19 @@ static void rcu_guarded_free(struct rcu_head *h) kfence_guarded_free((void *)meta->addr, meta, false); } =20 -/* - * Initialization of the KFENCE pool after its allocation. - * Returns 0 on success; otherwise returns the address up to - * which partial initialization succeeded. - */ -static unsigned long kfence_init_pool(void) +static void kfence_init_pool(void) { unsigned long addr =3D (unsigned long)__kfence_pool; int i; =20 - if (!arch_kfence_init_pool()) - return addr; /* * Protect the first 2 pages. The first page is mostly unnecessary, and * merely serves as an extended guard page. However, adding one * additional page in the beginning gives us an even number of pages, * which simplifies the mapping of address to metadata index. */ - for (i =3D 0; i < 2; i++) { - if (unlikely(!kfence_protect(addr))) - return addr; - - addr +=3D PAGE_SIZE; - } + for (i =3D 0; i < 2; i++, addr +=3D PAGE_SIZE) + kfence_protect(addr); =20 for (i =3D 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++, addr +=3D 2 * PAGE_SIZE= ) { struct kfence_metadata *meta =3D &kfence_metadata[i]; @@ -568,38 +557,33 @@ static unsigned long kfence_init_pool(void) list_add_tail(&meta->list, &kfence_freelist); =20 /* Protect the right redzone. */ - if (unlikely(!kfence_protect(addr + PAGE_SIZE))) - return addr; + kfence_protect(addr + PAGE_SIZE); =20 __folio_set_slab(slab_folio(slab)); #ifdef CONFIG_MEMCG slab->memcg_data =3D (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS; #endif } - - return 0; } =20 static bool __init kfence_init_pool_early(void) { - unsigned long addr; - if (!__kfence_pool) return false; =20 - addr =3D kfence_init_pool(); - - if (!addr) { - /* - * The pool is live and will never be deallocated from this point on. - * Ignore the pool object from the kmemleak phys object tree, as it would - * otherwise overlap with allocations returned by kfence_alloc(), which - * are registered with kmemleak through the slab post-alloc hook. - */ - kmemleak_ignore_phys(__pa(__kfence_pool)); - return true; - } + if (!arch_kfence_init_pool()) + goto free; =20 + kfence_init_pool(); + /* + * The pool is live and will never be deallocated from this point on. + * Ignore the pool object from the kmemleak phys object tree, as it would + * otherwise overlap with allocations returned by kfence_alloc(), which + * are registered with kmemleak through the slab post-alloc hook. + */ + kmemleak_ignore_phys(__pa(__kfence_pool)); + return true; +free: /* * Only release unprotected pages, and do not try to go back and change * page attributes due to risk of failing to do so as well. If changing @@ -607,27 +591,7 @@ static bool __init kfence_init_pool_early(void) * fails for the first page, and therefore expect addr=3D=3D__kfence_pool= in * most failure cases. */ - memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)= __kfence_pool)); - __kfence_pool =3D NULL; - return false; -} - -static bool kfence_init_pool_late(void) -{ - unsigned long addr, free_size; - - addr =3D kfence_init_pool(); - - if (!addr) - return true; - - /* Same as above. */ - free_size =3D KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); -#ifdef CONFIG_CONTIG_ALLOC - free_contig_range(page_to_pfn(virt_to_page((void *)addr)), free_size / PA= GE_SIZE); -#else - free_pages_exact((void *)addr, free_size); -#endif + memblock_free_late(__pa(__kfence_pool), KFENCE_POOL_SIZE); __kfence_pool =3D NULL; return false; } @@ -830,30 +794,50 @@ void __init kfence_init(void) kfence_init_enable(); } =20 -static int kfence_init_late(void) -{ - const unsigned long nr_pages =3D KFENCE_POOL_SIZE / PAGE_SIZE; #ifdef CONFIG_CONTIG_ALLOC - struct page *pages; +static inline void *kfence_pool_alloc(void) +{ + struct page *page =3D alloc_contig_pages(KFENCE_POOL_SIZE / PAGE_SIZE, + GFP_KERNEL, first_online_node, NULL); =20 - pages =3D alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NUL= L); - if (!pages) - return -ENOMEM; - __kfence_pool =3D page_to_virt(pages); + return page ? page_to_virt(page) : NULL; +} + +static inline void kfence_pool_free(const void *ptr) +{ + free_contig_range(page_to_pfn(virt_to_page(ptr)), KFENCE_POOL_SIZE / PAGE= _SIZE); +} #else +static inline void *kfence_pool_alloc(void) +{ BUILD_BUG_ON_MSG(get_order(KFENCE_POOL_SIZE) > MAX_ORDER, "CONFIG_KFENCE_NUM_OBJECTS is too large for buddy allocator"); =20 - __kfence_pool =3D alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); + return alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); +} + +static inline void kfence_pool_free(const void *ptr) +{ + free_pages_exact(virt_to_page(ptr), KFENCE_POOL_SIZE); +} +#endif + +static int kfence_init_late(void) +{ + if (__kfence_pool) + return 0; + + __kfence_pool =3D kfence_pool_alloc(); if (!__kfence_pool) return -ENOMEM; -#endif =20 - if (!kfence_init_pool_late()) { - pr_err("%s failed\n", __func__); + if (!arch_kfence_init_pool()) { + kfence_pool_free(__kfence_pool); + __kfence_pool =3D NULL; return -EBUSY; } =20 + kfence_init_pool(); kfence_init_enable(); kfence_debugfs_init(); =20 @@ -862,8 +846,8 @@ static int kfence_init_late(void) =20 static int kfence_enable_late(void) { - if (!__kfence_pool) - return kfence_init_late(); + if (kfence_init_late()) + return -ENOMEM; =20 WRITE_ONCE(kfence_enabled, true); queue_delayed_work(system_unbound_wq, &kfence_timer, 0); @@ -1054,8 +1038,9 @@ bool kfence_handle_page_fault(unsigned long addr, boo= l is_write, struct pt_regs if (!is_kfence_address((void *)addr)) return false; =20 - if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ - return kfence_unprotect(addr); /* ... unprotect and proceed. */ + /* If disabled at runtime ... unprotect and proceed. */ + if (!READ_ONCE(kfence_enabled)) + goto out; =20 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); =20 @@ -1079,7 +1064,7 @@ bool kfence_handle_page_fault(unsigned long addr, boo= l is_write, struct pt_regs } =20 if (!to_report) - goto out; + goto report; =20 raw_spin_lock_irqsave(&to_report->lock, flags); to_report->unprotected_page =3D addr; @@ -1093,7 +1078,7 @@ bool kfence_handle_page_fault(unsigned long addr, boo= l is_write, struct pt_regs } else { to_report =3D addr_to_metadata(addr); if (!to_report) - goto out; + goto report; =20 raw_spin_lock_irqsave(&to_report->lock, flags); error_type =3D KFENCE_ERROR_UAF; @@ -1105,7 +1090,7 @@ bool kfence_handle_page_fault(unsigned long addr, boo= l is_write, struct pt_regs */ } =20 -out: +report: if (to_report) { kfence_report_error(addr, is_write, regs, to_report, error_type); raw_spin_unlock_irqrestore(&to_report->lock, flags); @@ -1113,6 +1098,7 @@ bool kfence_handle_page_fault(unsigned long addr, boo= l is_write, struct pt_regs /* This may be a UAF or OOB access, but we can't be sure. */ kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID); } - - return kfence_unprotect(addr); /* Unprotect and let access proceed. */ +out: + kfence_unprotect(addr); + return true; } --=20 2.11.0 From nobody Thu May 2 22:54:12 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CF87AC76196 for ; Tue, 28 Mar 2023 09:59:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233034AbjC1J7b (ORCPT ); Tue, 28 Mar 2023 05:59:31 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47196 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232901AbjC1J7M (ORCPT ); Tue, 28 Mar 2023 05:59:12 -0400 Received: from mail-pf1-x429.google.com (mail-pf1-x429.google.com [IPv6:2607:f8b0:4864:20::429]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EFDCD5BAE for ; Tue, 28 Mar 2023 02:58:47 -0700 (PDT) Received: by mail-pf1-x429.google.com with SMTP id i15so7583110pfo.8 for ; Tue, 28 Mar 2023 02:58:47 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=bytedance.com; s=google; t=1679997527; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=15Z/PJf0Etf+mCvL+KJkEExIR5UO6fpjH+VUxXpLOps=; b=Qx3+v0sZWmUR+ce5l5MqQpd0SAUJEWb0aMDPuqaDGWTo2nZ3Wt+/EWNMN0CiEAmd00 oTzaOuBYvp5IpyTjML80vgl9ojlZu6X7RxGSvyffWJFXCt43KPzhXd/iS8yeRgsejWrP FB+3T6fofiINoekHWqN2WApQUWGvX7bhd+zsJ0LEfvQMmZC7J/Z58o6Yu8Fg0YJgA5dD U5S/kAP6t560+N8wYkN1uqz4IRP232N9dBKfyi+Pr0qHtZeRD4S7DIfi+nbFhJk6P5UR 6HnQZc7vHWimyHei7ItyH6oDI/3Q67JEsR5mXPYeO9JRjRx5a5nwt5lMF5XG/eFNzRa0 KjZA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679997527; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=15Z/PJf0Etf+mCvL+KJkEExIR5UO6fpjH+VUxXpLOps=; b=cspVaHqkDgbYks2bHSmgAqyouTsKvT5cDU0LF/6XQT8aWXu33ycrDSJDgoPYzHVFVY lyOniZNhqffAIZXzPU7z1wgeSQzt+2evSGyB9kSNkwPDXZfHB0Fpn/er0tS8ACk1Mzdo zyjgQVJe5ABkKgv4OLDiQDZTe0Vyf0kdJ1ftHXVbTnwhXAlwifO8AqK6Qk2ay6apwezz mqZDyB1mKNTv2Li1H81td/eqBaJejQj5RiJ6DYCprmo0PTCScSqn2o5H2pjpN52MSmYM TAA65LhJdfZ1sahrs0pD6kF9uIJyoa7N4vsgXSMrtWR93GTRXdZHTU9zdIuKiugxXScX MiEQ== X-Gm-Message-State: AAQBX9fCp5L0WsSbBbn0Bs5NJ3h8SDzSMWLCkZH1AYnqYOFDCJP7mQ+q cIxzizubnLJa14Ge2Omx734xNQ== X-Google-Smtp-Source: AKy350ZjS30LuwRfFP7DkSp3XNPM4Idyf1sfqofA7Lh6rHryfmWu8BfaGo3V4Xaq7lv0dFHBxL1eMg== X-Received: by 2002:a62:6454:0:b0:5a8:b2bf:26ac with SMTP id y81-20020a626454000000b005a8b2bf26acmr14066890pfb.20.1679997527531; Tue, 28 Mar 2023 02:58:47 -0700 (PDT) Received: from PXLDJ45XCM.bytedance.net ([139.177.225.236]) by smtp.gmail.com with ESMTPSA id m26-20020aa78a1a000000b005a8a5be96b2sm17207556pfa.104.2023.03.28.02.58.43 (version=TLS1_3 cipher=TLS_CHACHA20_POLY1305_SHA256 bits=256/256); Tue, 28 Mar 2023 02:58:46 -0700 (PDT) From: Muchun Song To: glider@google.com, elver@google.com, dvyukov@google.com, akpm@linux-foundation.org, jannh@google.com, sjpark@amazon.de, muchun.song@linux.dev Cc: kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org, Muchun Song Subject: [PATCH 4/6] mm: kfence: remove useless check for CONFIG_KFENCE_NUM_OBJECTS Date: Tue, 28 Mar 2023 17:58:05 +0800 Message-Id: <20230328095807.7014-5-songmuchun@bytedance.com> X-Mailer: git-send-email 2.37.1 (Apple Git-137.1) In-Reply-To: <20230328095807.7014-1-songmuchun@bytedance.com> References: <20230328095807.7014-1-songmuchun@bytedance.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The CONFIG_KFENCE_NUM_OBJECTS is limited by kconfig and vary from 1 to 65535, so CONFIG_KFENCE_NUM_OBJECTS cannot be equabl to or smaller than 0. Removing it to simplify code. Signed-off-by: Muchun Song Reviewed-by: Alexander Potapenko --- mm/kfence/core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 5726bf2ae13c..41befcb3b069 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -115,7 +115,6 @@ EXPORT_SYMBOL(__kfence_pool); /* Export for test module= s. */ * Per-object metadata, with one-to-one mapping of object metadata to * backing pages (in __kfence_pool). */ -static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; =20 /* Freelist with available objects. */ --=20 2.11.0 From nobody Thu May 2 22:54:12 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DC45EC76195 for ; Tue, 28 Mar 2023 09:59:06 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232898AbjC1J7F (ORCPT ); Tue, 28 Mar 2023 05:59:05 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47176 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232544AbjC1J64 (ORCPT ); Tue, 28 Mar 2023 05:58:56 -0400 Received: from mail-pj1-x102c.google.com (mail-pj1-x102c.google.com [IPv6:2607:f8b0:4864:20::102c]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 61D8165A1 for ; Tue, 28 Mar 2023 02:58:55 -0700 (PDT) Received: by mail-pj1-x102c.google.com with SMTP id lr16-20020a17090b4b9000b0023f187954acso11941804pjb.2 for ; Tue, 28 Mar 2023 02:58:55 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=bytedance.com; s=google; t=1679997535; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=IMHBZhrpM5z4k2ZtEf9hxQYrO1Qyd30oWmty+49rOpw=; b=cRycGiJJnkVXyhfrk/dJRNSCMoFpZNGGzl7dazDlUieFTT+oQ+yoOI0svPNXNgtJqi ueJI4DHM5wT7gQ5RfXmNTIxsoHKaTtsJl7K7gn59uLOroQOiZT59BCAtb8NqmrT1Mh3k Gmp9KRHfZU2HK7LFnjWVQlIsu8uap2Y3Occ0QtUuXKCCiQ/RNy7ku8loyA64bCWAh8nu YarRVEK3zgJVrD1NAyG+Rw6Yp31+UQjj/8IL6k6ldDxLShPxNG7chryjPAjYipTl6RtD e5Kw7dStq3e/3/leQwjmSZgKAQc77VNntEv25vM1Y9n+FcT/YOolm+LYWL6kjyf2byr7 cH1Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679997535; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=IMHBZhrpM5z4k2ZtEf9hxQYrO1Qyd30oWmty+49rOpw=; b=U/BQWjgeegBbGv1pBkwvZYCXwfrA+iGrTUU4/CEy4SyXeQdglpPo2csmA0Ssz8wJrU 22AA96maC2uYo1HXiENJgo3TCmHmuFKtmCJVJm2tQ+wMze5orzGLdZla+Duonh6t0ovI 5iFb7h5IAFDiv2OikUavB94i3E6nGHdoRJO5lx7xLVbX4Iu19PX+66GheCifNawXrige CsBkjWZiyWEDywY7jaGcebP8nLNcgcUZqe2R8LFRLbabkgrhUcbASq6lDURyOcvqpcix Z8zAHpkTlZScWWq17bxZJIiNv7Z2GdtSMFK8EuOlfqjSV8KXrOf/6hFs+rQbLOzdUoAI JQPw== X-Gm-Message-State: AO0yUKVku8Nwp+YlRL8CSf2rTXvW0ihkfNHeV7mtij7T+HRAQjUtesat L9j8wM7k2jwAUop2EWhsL1p1iMEhsmdZYQ+Xdmk/qg== X-Google-Smtp-Source: AK7set932jr2dvDlZkZ6I1j9l5nVAPzrKihlTnIBzV515yMv5dukdFImzzt3Wkm14B5fhfxAnCRdyA== X-Received: by 2002:a05:6a20:6baf:b0:da:1e1:3f46 with SMTP id bu47-20020a056a206baf00b000da01e13f46mr13145721pzb.31.1679997534793; Tue, 28 Mar 2023 02:58:54 -0700 (PDT) Received: from PXLDJ45XCM.bytedance.net ([139.177.225.236]) by smtp.gmail.com with ESMTPSA id m26-20020aa78a1a000000b005a8a5be96b2sm17207556pfa.104.2023.03.28.02.58.48 (version=TLS1_3 cipher=TLS_CHACHA20_POLY1305_SHA256 bits=256/256); Tue, 28 Mar 2023 02:58:54 -0700 (PDT) From: Muchun Song To: glider@google.com, elver@google.com, dvyukov@google.com, akpm@linux-foundation.org, jannh@google.com, sjpark@amazon.de, muchun.song@linux.dev Cc: kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org, Muchun Song Subject: [PATCH 5/6] mm: kfence: change kfence pool page layout Date: Tue, 28 Mar 2023 17:58:06 +0800 Message-Id: <20230328095807.7014-6-songmuchun@bytedance.com> X-Mailer: git-send-email 2.37.1 (Apple Git-137.1) In-Reply-To: <20230328095807.7014-1-songmuchun@bytedance.com> References: <20230328095807.7014-1-songmuchun@bytedance.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" The original kfence pool layout (Given a layout with 2 objects): +------------+------------+------------+------------+------------+--------= ----+ | guard page | guard page | object | guard page | object | guard p= age | +------------+------------+------------+------------+------------+--------= ----+ | | = | +----kfence_metadata[0]---+----kfence_metadata[1= ]---+ The comment says "the additional page in the beginning gives us an even number of pages, which simplifies the mapping of address to metadata index". However, removing the additional page does not complicate any mapping calculations. So changing it to the new layout to save a page. And remmove the KFENCE_ERROR_INVALID test since we cannot test this case easily. The new kfence pool layout (Given a layout with 2 objects): +------------+------------+------------+------------+------------+ | guard page | object | guard page | object | guard page | +------------+------------+------------+------------+------------+ | | | +----kfence_metadata[0]---+----kfence_metadata[1]---+ Signed-off-by: Muchun Song --- include/linux/kfence.h | 8 ++------ mm/kfence/core.c | 40 ++++++++-------------------------------- mm/kfence/kfence.h | 2 +- mm/kfence/kfence_test.c | 14 -------------- 4 files changed, 11 insertions(+), 53 deletions(-) diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 726857a4b680..25b13a892717 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -19,12 +19,8 @@ =20 extern unsigned long kfence_sample_interval; =20 -/* - * We allocate an even number of pages, as it simplifies calculations to m= ap - * address to metadata indices; effectively, the very first page serves as= an - * extended guard page, but otherwise has no special purpose. - */ -#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) +/* The last page serves as an extended guard page. */ +#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS * 2 + 1) * PAGE_SIZE) extern char *__kfence_pool; =20 DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 41befcb3b069..f205b860f460 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -240,24 +240,7 @@ static inline void kfence_unprotect(unsigned long addr) =20 static inline unsigned long metadata_to_pageaddr(const struct kfence_metad= ata *meta) { - unsigned long offset =3D (meta - kfence_metadata + 1) * PAGE_SIZE * 2; - unsigned long pageaddr =3D (unsigned long)&__kfence_pool[offset]; - - /* The checks do not affect performance; only called from slow-paths. */ - - /* Only call with a pointer into kfence_metadata. */ - if (KFENCE_WARN_ON(meta < kfence_metadata || - meta >=3D kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) - return 0; - - /* - * This metadata object only ever maps to 1 page; verify that the stored - * address is in the expected range. - */ - if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) !=3D pageaddr)) - return 0; - - return pageaddr; + return ALIGN_DOWN(meta->addr, PAGE_SIZE); } =20 /* @@ -535,34 +518,27 @@ static void kfence_init_pool(void) unsigned long addr =3D (unsigned long)__kfence_pool; int i; =20 - /* - * Protect the first 2 pages. The first page is mostly unnecessary, and - * merely serves as an extended guard page. However, adding one - * additional page in the beginning gives us an even number of pages, - * which simplifies the mapping of address to metadata index. - */ - for (i =3D 0; i < 2; i++, addr +=3D PAGE_SIZE) - kfence_protect(addr); - for (i =3D 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++, addr +=3D 2 * PAGE_SIZE= ) { struct kfence_metadata *meta =3D &kfence_metadata[i]; - struct slab *slab =3D page_slab(virt_to_page(addr)); + struct slab *slab =3D page_slab(virt_to_page(addr + PAGE_SIZE)); =20 /* Initialize metadata. */ INIT_LIST_HEAD(&meta->list); raw_spin_lock_init(&meta->lock); meta->state =3D KFENCE_OBJECT_UNUSED; - meta->addr =3D addr; /* Initialize for validation in metadata_to_pageadd= r(). */ + meta->addr =3D addr + PAGE_SIZE; list_add_tail(&meta->list, &kfence_freelist); =20 - /* Protect the right redzone. */ - kfence_protect(addr + PAGE_SIZE); + /* Protect the left redzone. */ + kfence_protect(addr); =20 __folio_set_slab(slab_folio(slab)); #ifdef CONFIG_MEMCG slab->memcg_data =3D (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS; #endif } + + kfence_protect(addr); } =20 static bool __init kfence_init_pool_early(void) @@ -1043,7 +1019,7 @@ bool kfence_handle_page_fault(unsigned long addr, boo= l is_write, struct pt_regs =20 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); =20 - if (page_index % 2) { + if (page_index % 2 =3D=3D 0) { /* This is a redzone, report a buffer overflow. */ struct kfence_metadata *meta; int distance =3D 0; diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index 600f2e2431d6..249d420100a7 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -110,7 +110,7 @@ static inline struct kfence_metadata *addr_to_metadata(= unsigned long addr) * __kfence_pool, in which case we would report an "invalid access" * error. */ - index =3D (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; + index =3D (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2); if (index < 0 || index >=3D CONFIG_KFENCE_NUM_OBJECTS) return NULL; =20 diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index b5d66a69200d..d479f9c8afb1 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -637,19 +637,6 @@ static void test_gfpzero(struct kunit *test) KUNIT_EXPECT_FALSE(test, report_available()); } =20 -static void test_invalid_access(struct kunit *test) -{ - const struct expect_report expect =3D { - .type =3D KFENCE_ERROR_INVALID, - .fn =3D test_invalid_access, - .addr =3D &__kfence_pool[10], - .is_write =3D false, - }; - - READ_ONCE(__kfence_pool[10]); - KUNIT_EXPECT_TRUE(test, report_matches(&expect)); -} - /* Test SLAB_TYPESAFE_BY_RCU works. */ static void test_memcache_typesafe_by_rcu(struct kunit *test) { @@ -787,7 +774,6 @@ static struct kunit_case kfence_test_cases[] =3D { KUNIT_CASE(test_kmalloc_aligned_oob_write), KUNIT_CASE(test_shrink_memcache), KUNIT_CASE(test_memcache_ctor), - KUNIT_CASE(test_invalid_access), KUNIT_CASE(test_gfpzero), KUNIT_CASE(test_memcache_typesafe_by_rcu), KUNIT_CASE(test_krealloc), --=20 2.11.0 From nobody Thu May 2 22:54:12 2024 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C7379C76196 for ; Tue, 28 Mar 2023 09:59:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233073AbjC1J7i (ORCPT ); Tue, 28 Mar 2023 05:59:38 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47896 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232853AbjC1J7Z (ORCPT ); Tue, 28 Mar 2023 05:59:25 -0400 Received: from mail-pj1-x1030.google.com (mail-pj1-x1030.google.com [IPv6:2607:f8b0:4864:20::1030]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 453CB6E8E for ; Tue, 28 Mar 2023 02:59:00 -0700 (PDT) Received: by mail-pj1-x1030.google.com with SMTP id gp15-20020a17090adf0f00b0023d1bbd9f9eso14616354pjb.0 for ; Tue, 28 Mar 2023 02:59:00 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=bytedance.com; s=google; t=1679997539; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=a9z+2OKezgBl6Xq8s4/uft3FH4lHgs0F/FglMYKuxZE=; b=VsNWH6wbabZBW09E5p1rywOaT5p8dwv3DVQF4JxvKy8pAkOwOWTmSR3v5YYbHdOVIH o9LpPW7UeEmwFT8bwKYV0uifRm0n7p6iEYBMeiwyTvcS/TasidJuyj7P88eL57v1KTDd tI7u4PDJStX8H8gJLJrbqJ2Kof9xiRkHbs+wEsWSrPnUYYMPFzysTbud6J24HgyFWNpP x/6rvB/nHTaJWJCevltlgD8XQOccc/yfD7VHMVUS6y71oqY16doN1wfGwwkfO74aIK1P TXfWcZDvSIRMhEA4BilI3L6ew7QAaPregsJwYg77jDT/Rie9uRJd+pu1GfBpPZf028JJ BL6w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679997539; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=a9z+2OKezgBl6Xq8s4/uft3FH4lHgs0F/FglMYKuxZE=; b=LXQhbmv074VmdwOfaHR8Iv7UwbvY8hoIDYiw4RYUJaihZwWSinfjnkgvXQNL4zX/xm yUX1DAVRzuV2uhVOABOoBX+4Hjo9lcCyKg6YIrK/v8yAIghoiFcpNKGSttWS7oJi/Wis mNvq75i1lyb3LZJgLcuDUZSln6sr5q0TIckMOO1PHUFU7rE9sJQbc3t1TwHMRmRu25sv 8lcyd3u32szqptcEHr7LyBoVDPEdI02wKdyfoDa/nekYeAyebvAFgOPWvcnBvUL2L8so 3lphDEGAOFz1u2Cy0lSepIfji2j+xasPa4CyrSO+9J5pSB0/J56gbYF7AhANweYqxGFm WbSg== X-Gm-Message-State: AO0yUKUDlikUitfj/AIYnUq1abfs03WKGc3vZ5d7QGueUZIXaLPoDWYJ mlcQZHRgoogOtTSZU12m1knrzw== X-Google-Smtp-Source: AK7set+sSOqvm7BhrGFyb5OKdu0ZTvBWUBOT2Bfsk23ttc/ateaVqwe3xWklthmOX72MppMUmuO5pA== X-Received: by 2002:a05:6a20:6aa0:b0:d9:2d4e:c08c with SMTP id bi32-20020a056a206aa000b000d92d4ec08cmr12797492pzb.61.1679997539764; Tue, 28 Mar 2023 02:58:59 -0700 (PDT) Received: from PXLDJ45XCM.bytedance.net ([139.177.225.236]) by smtp.gmail.com with ESMTPSA id m26-20020aa78a1a000000b005a8a5be96b2sm17207556pfa.104.2023.03.28.02.58.55 (version=TLS1_3 cipher=TLS_CHACHA20_POLY1305_SHA256 bits=256/256); Tue, 28 Mar 2023 02:58:59 -0700 (PDT) From: Muchun Song To: glider@google.com, elver@google.com, dvyukov@google.com, akpm@linux-foundation.org, jannh@google.com, sjpark@amazon.de, muchun.song@linux.dev Cc: kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-kernel@vger.kernel.org, Muchun Song Subject: [PATCH 6/6] mm: kfence: replace ALIGN_DOWN(x, PAGE_SIZE) with PAGE_ALIGN_DOWN(x) Date: Tue, 28 Mar 2023 17:58:07 +0800 Message-Id: <20230328095807.7014-7-songmuchun@bytedance.com> X-Mailer: git-send-email 2.37.1 (Apple Git-137.1) In-Reply-To: <20230328095807.7014-1-songmuchun@bytedance.com> References: <20230328095807.7014-1-songmuchun@bytedance.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Replace ALIGN_DOWN(x, PAGE_SIZE) with PAGE_ALIGN_DOWN(x) to simplify the code a bit. Signed-off-by: Muchun Song Reviewed-by: Marco Elver --- mm/kfence/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index f205b860f460..dbfb79a4d624 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -230,17 +230,17 @@ static bool alloc_covered_contains(u32 alloc_stack_ha= sh) =20 static inline void kfence_protect(unsigned long addr) { - kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true); + kfence_protect_page(PAGE_ALIGN_DOWN(addr), true); } =20 static inline void kfence_unprotect(unsigned long addr) { - kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false); + kfence_protect_page(PAGE_ALIGN_DOWN(addr), false); } =20 static inline unsigned long metadata_to_pageaddr(const struct kfence_metad= ata *meta) { - return ALIGN_DOWN(meta->addr, PAGE_SIZE); + return PAGE_ALIGN_DOWN(meta->addr); } =20 /* @@ -308,7 +308,7 @@ static inline bool check_canary_byte(u8 *addr) /* __always_inline this to ensure we won't do an indirect call to fn. */ static __always_inline void for_each_canary(const struct kfence_metadata *= meta, bool (*fn)(u8 *)) { - const unsigned long pageaddr =3D ALIGN_DOWN(meta->addr, PAGE_SIZE); + const unsigned long pageaddr =3D PAGE_ALIGN_DOWN(meta->addr); unsigned long addr; =20 /* @@ -455,7 +455,7 @@ static void kfence_guarded_free(void *addr, struct kfen= ce_metadata *meta, bool z } =20 /* Detect racy use-after-free, or incorrect reallocation of this page by = KFENCE. */ - kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SI= ZE), PAGE_SIZE, + kcsan_begin_scoped_access((void *)PAGE_ALIGN_DOWN((unsigned long)addr), P= AGE_SIZE, KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, &assert_page_exclusive); =20 @@ -464,7 +464,7 @@ static void kfence_guarded_free(void *addr, struct kfen= ce_metadata *meta, bool z =20 /* Restore page protection if there was an OOB access. */ if (meta->unprotected_page) { - memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), = PAGE_SIZE); + memzero_explicit((void *)PAGE_ALIGN_DOWN(meta->unprotected_page), PAGE_S= IZE); kfence_protect(meta->unprotected_page); meta->unprotected_page =3D 0; } --=20 2.11.0