From nobody Wed Feb 11 14:46:40 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7764AC77B7F for ; Mon, 8 May 2023 09:35:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233729AbjEHJfs (ORCPT ); Mon, 8 May 2023 05:35:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37290 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233600AbjEHJfq (ORCPT ); Mon, 8 May 2023 05:35:46 -0400 Received: from SHSQR01.spreadtrum.com (unknown [222.66.158.135]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 63BC213A for ; Mon, 8 May 2023 02:35:42 -0700 (PDT) Received: from SHSend.spreadtrum.com (bjmbx01.spreadtrum.com [10.0.64.7]) by SHSQR01.spreadtrum.com with ESMTP id 3489XTVE033277; Mon, 8 May 2023 17:33:29 +0800 (+08) (envelope-from zhaoyang.huang@unisoc.com) Received: from bj03382pcu.spreadtrum.com (10.0.74.65) by BJMBX01.spreadtrum.com (10.0.64.7) with Microsoft SMTP Server (TLS) id 15.0.1497.23; Mon, 8 May 2023 17:33:25 +0800 From: "zhaoyang.huang" To: Andrew Morton , Roman Gushchin , Minchan Kim , Roman Gushchin , Joonsoo Kim , , , Zhaoyang Huang , Subject: [PATCH 1/2] mm: optimization on page allocation when CMA enabled Date: Mon, 8 May 2023 17:33:02 +0800 Message-ID: <1683538383-19685-2-git-send-email-zhaoyang.huang@unisoc.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1683538383-19685-1-git-send-email-zhaoyang.huang@unisoc.com> References: <1683538383-19685-1-git-send-email-zhaoyang.huang@unisoc.com> MIME-Version: 1.0 X-Originating-IP: [10.0.74.65] X-ClientProxiedBy: SHCAS03.spreadtrum.com (10.0.1.207) To BJMBX01.spreadtrum.com (10.0.64.7) X-MAIL: SHSQR01.spreadtrum.com 3489XTVE033277 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Zhaoyang Huang Let us look at the series of scenarios below with WMARK_LOW=3D25MB,WMARK_MI= N=3D5MB (managed pages 1.9GB). We can know that current 'fixed 1/2 ratio' start to = use CMA since C which actually has caused U&R lower than WMARK_LOW (this should= be deemed as against current memory policy, that is, UNMOVABLE & RECLAIMABLE s= hould either stay around WATERMARK_LOW when no allocation or do reclaim via enter= ing slowpath) -- Free_pages | | -- WMARK_LOW | -- Free_CMA | | -- Free_CMA/Free_pages(MB) A(12/30) B(12/25) C(12/20) fixed 1/2 ratio N N Y this commit Y Y Y Signed-off-by: Zhaoyang Huang --- v2: do proportion check when zone_watermark_ok, update commit message v3: update coding style and simplify the logic when zone_watermark_ok --- --- mm/page_alloc.c | 46 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0745aed..7aca49d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3071,6 +3071,41 @@ static bool unreserve_highatomic_pageblock(const str= uct alloc_context *ac, =20 } =20 +#ifdef CONFIG_CMA +/* + * GFP_MOVABLE allocation could drain UNMOVABLE & RECLAIMABLE page blocks = via + * the help of CMA which makes GFP_KERNEL failed. Checking if zone_waterma= rk_ok + * again without ALLOC_CMA to see if to use CMA first. + */ +static bool __if_use_cma_first(struct zone *zone, unsigned int order, unsi= gned int alloc_flags) +{ + unsigned long watermark; + bool cma_first =3D false; + + watermark =3D wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); + /* check if GFP_MOVABLE pass previous zone_watermark_ok via the help of C= MA */ + if (!zone_watermark_ok(zone, order, watermark, 0, alloc_flags & (~ALLOC_C= MA))) + /* + * watermark failed means UNMOVABLE & RECLAIMBLE is not enough + * now, we should use cma first to keep them stay around the + * corresponding watermark + */ + cma_first =3D true; + else + /* + * remain previous fixed 1/2 logic when watermark ok as we have + * above protection now + */ + cma_first =3D (zone_page_state(zone, NR_FREE_CMA_PAGES) > + zone_page_state(zone, NR_FREE_PAGES) / 2); + return cma_first; +} +#else +static bool __if_use_cma_first(struct zone *zone, unsigned int order, unsi= gned int alloc_flags) +{ + return false; +} +#endif /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. @@ -3084,13 +3119,12 @@ static bool unreserve_highatomic_pageblock(const st= ruct alloc_context *ac, if (IS_ENABLED(CONFIG_CMA)) { /* * Balance movable allocations between regular and CMA areas by - * allocating from CMA when over half of the zone's free memory - * is in the CMA area. + * allocating from CMA base on judging zone_watermark_ok again + * to see if the latest check got pass via the help of CMA */ - if (alloc_flags & ALLOC_CMA && - zone_page_state(zone, NR_FREE_CMA_PAGES) > - zone_page_state(zone, NR_FREE_PAGES) / 2) { - page =3D __rmqueue_cma_fallback(zone, order); + if (migratetype =3D=3D MIGRATE_MOVABLE) { + page =3D __if_use_cma_first(zone, order, alloc_flags) ? + __rmqueue_cma_fallback(zone, order) : NULL; if (page) return page; } --=20 1.9.1 From nobody Wed Feb 11 14:46:40 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1B547C77B7F for ; Mon, 8 May 2023 09:35:57 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233760AbjEHJfz (ORCPT ); Mon, 8 May 2023 05:35:55 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37288 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232854AbjEHJfq (ORCPT ); Mon, 8 May 2023 05:35:46 -0400 Received: from SHSQR01.spreadtrum.com (unknown [222.66.158.135]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 63E4619AC for ; Mon, 8 May 2023 02:35:42 -0700 (PDT) Received: from SHSend.spreadtrum.com (bjmbx01.spreadtrum.com [10.0.64.7]) by SHSQR01.spreadtrum.com with ESMTP id 3489XYUC033919; Mon, 8 May 2023 17:33:35 +0800 (+08) (envelope-from zhaoyang.huang@unisoc.com) Received: from bj03382pcu.spreadtrum.com (10.0.74.65) by BJMBX01.spreadtrum.com (10.0.64.7) with Microsoft SMTP Server (TLS) id 15.0.1497.23; Mon, 8 May 2023 17:33:29 +0800 From: "zhaoyang.huang" To: Andrew Morton , Roman Gushchin , Minchan Kim , Roman Gushchin , Joonsoo Kim , , , Zhaoyang Huang , Subject: [PATCH 2/2] mm: skip CMA pages when they are not available Date: Mon, 8 May 2023 17:33:03 +0800 Message-ID: <1683538383-19685-3-git-send-email-zhaoyang.huang@unisoc.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1683538383-19685-1-git-send-email-zhaoyang.huang@unisoc.com> References: <1683538383-19685-1-git-send-email-zhaoyang.huang@unisoc.com> MIME-Version: 1.0 X-Originating-IP: [10.0.74.65] X-ClientProxiedBy: SHCAS03.spreadtrum.com (10.0.1.207) To BJMBX01.spreadtrum.com (10.0.64.7) X-MAIL: SHSQR01.spreadtrum.com 3489XYUC033919 Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Zhaoyang Huang This patch fixes unproductive reclaiming of CMA pages by skipping them when= they are not available for current context. It is arise from bellowing OOM issue= , which caused by large proportion of MIGRATE_CMA pages among free pages. There has= been commit(168676649) to fix it by trying CMA pages first instead of fallback in rmqueue. 04166 < 4> [ 36.172486] [03-19 10:05:52.172] ActivityManager: page alloca= tion failure: order:0, mode:0xc00(GFP_NOIO), nodemask=3D(null),cpuset=3Dfor= eground,mems_allowed=3D0 0419C < 4> [ 36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217= *16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0= *1024kB 0*2048kB 0*4096kB =3D 35848kB 0419D < 4> [ 36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8k= B (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) = 0*1024kB 0*2048kB 0*4096kB =3D 3236kB ...... 041EA < 4> [ 36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate mem= ory on node -1, gfp=3D0xa20(GFP_ATOMIC) 041EB < 4> [ 36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object s= ize: 64, buffer size: 64, default order: 0, min order: 0 041EC < 4> [ 36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392= , free: 0 Signed-off-by: Zhaoyang Huang --- mm/vmscan.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index bd6637f..19fb445 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2225,10 +2225,16 @@ static unsigned long isolate_lru_folios(unsigned lo= ng nr_to_scan, unsigned long nr_skipped[MAX_NR_ZONES] =3D { 0, }; unsigned long skipped =3D 0; unsigned long scan, total_scan, nr_pages; + bool cma_cap =3D true; + struct page *page; LIST_HEAD(folios_skipped); =20 total_scan =3D 0; scan =3D 0; + if ((IS_ENABLED(CONFIG_CMA)) && !current_is_kswapd() + && (gfp_migratetype(sc->gfp_mask) !=3D MIGRATE_MOVABLE)) + cma_cap =3D false; + while (scan < nr_to_scan && !list_empty(src)) { struct list_head *move_to =3D src; struct folio *folio; @@ -2239,12 +2245,17 @@ static unsigned long isolate_lru_folios(unsigned lo= ng nr_to_scan, nr_pages =3D folio_nr_pages(folio); total_scan +=3D nr_pages; =20 - if (folio_zonenum(folio) > sc->reclaim_idx) { + page =3D &folio->page; + + if ((folio_zonenum(folio) > sc->reclaim_idx) +#ifdef CONFIG_CMA + || (get_pageblock_migratetype(page) =3D=3D MIGRATE_CMA && !cma_cap) +#endif + ) { nr_skipped[folio_zonenum(folio)] +=3D nr_pages; move_to =3D &folios_skipped; goto move; } - /* * Do not count skipped folios because that makes the function * return with no isolated folios if the LRU mostly contains --=20 1.9.1