From nobody Thu Sep 18 02:41:10 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 24012C4332F for ; Tue, 13 Dec 2022 09:14:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234777AbiLMJOW (ORCPT ); Tue, 13 Dec 2022 04:14:22 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45922 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233843AbiLMJM7 (ORCPT ); Tue, 13 Dec 2022 04:12:59 -0500 Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 72AC215FF0 for ; Tue, 13 Dec 2022 01:11:23 -0800 (PST) Received: from dggpemm500001.china.huawei.com (unknown [172.30.72.54]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4NWXlL3283zRptt; Tue, 13 Dec 2022 17:10:22 +0800 (CST) Received: from localhost.localdomain.localdomain (10.175.113.25) by dggpemm500001.china.huawei.com (7.185.36.107) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.34; Tue, 13 Dec 2022 17:11:21 +0800 From: Kefeng Wang To: Andrew Morton , David Hildenbrand , Oscar Salvador , SeongJae Park CC: , , , , , Kefeng Wang Subject: [PATCH -next 6/8] mm: damon: paddr: convert damon_pa_*() to use folios Date: Tue, 13 Dec 2022 17:27:33 +0800 Message-ID: <20221213092735.187924-7-wangkefeng.wang@huawei.com> X-Mailer: git-send-email 2.35.3 In-Reply-To: <20221213092735.187924-1-wangkefeng.wang@huawei.com> References: <20221213092735.187924-1-wangkefeng.wang@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.175.113.25] X-ClientProxiedBy: dggems703-chm.china.huawei.com (10.3.19.180) To dggpemm500001.china.huawei.com (7.185.36.107) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" With damon_get_folio(), let's convert damon_ptep_mkold() and damon_pmdp_mkold() to use folios. Signed-off-by: Kefeng Wang --- mm/damon/paddr.c | 44 +++++++++++++++++++------------------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 6b36de1396a4..95d4686611a5 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -33,17 +33,15 @@ static bool __damon_pa_mkold(struct folio *folio, struc= t vm_area_struct *vma, =20 static void damon_pa_mkold(unsigned long paddr) { - struct folio *folio; - struct page *page =3D damon_get_page(PHYS_PFN(paddr)); + struct folio *folio =3D damon_get_folio(PHYS_PFN(paddr)); struct rmap_walk_control rwc =3D { .rmap_one =3D __damon_pa_mkold, .anon_lock =3D folio_lock_anon_vma_read, }; bool need_lock; =20 - if (!page) + if (!folio) return; - folio =3D page_folio(page); =20 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { folio_set_idle(folio); @@ -93,7 +91,7 @@ static bool __damon_pa_young(struct folio *folio, struct = vm_area_struct *vma, DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); =20 result->accessed =3D false; - result->page_sz =3D PAGE_SIZE; + result->page_sz =3D PAGE_SIZE * folio_nr_pages(folio); while (page_vma_mapped_walk(&pvmw)) { addr =3D pvmw.address; if (pvmw.pte) { @@ -122,8 +120,7 @@ static bool __damon_pa_young(struct folio *folio, struc= t vm_area_struct *vma, =20 static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) { - struct folio *folio; - struct page *page =3D damon_get_page(PHYS_PFN(paddr)); + struct folio *folio =3D damon_get_folio(PHYS_PFN(paddr)); struct damon_pa_access_chk_result result =3D { .page_sz =3D PAGE_SIZE, .accessed =3D false, @@ -135,9 +132,8 @@ static bool damon_pa_young(unsigned long paddr, unsigne= d long *page_sz) }; bool need_lock; =20 - if (!page) + if (!folio) return false; - folio =3D page_folio(page); =20 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { if (folio_test_idle(folio)) @@ -205,28 +201,28 @@ static unsigned int damon_pa_check_accesses(struct da= mon_ctx *ctx) static unsigned long damon_pa_pageout(struct damon_region *r) { unsigned long addr, applied; - LIST_HEAD(page_list); + LIST_HEAD(folio_list); =20 for (addr =3D r->ar.start; addr < r->ar.end; addr +=3D PAGE_SIZE) { - struct page *page =3D damon_get_page(PHYS_PFN(addr)); + struct folio *folio =3D damon_get_folio(PHYS_PFN(addr)); =20 - if (!page) + if (!folio) continue; =20 - ClearPageReferenced(page); - test_and_clear_page_young(page); - if (isolate_lru_page(page)) { - put_page(page); + folio_clear_referenced(folio); + folio_test_clear_young(folio); + if (folio_isolate_lru(folio)) { + folio_put(folio); continue; } - if (PageUnevictable(page)) { - putback_lru_page(page); + if (folio_test_unevictable(folio)) { + folio_putback_lru(folio); } else { - list_add(&page->lru, &page_list); - put_page(page); + list_add(&folio->lru, &folio_list); + folio_put(folio); } } - applied =3D reclaim_pages(&page_list); + applied =3D reclaim_pages(&folio_list); cond_resched(); return applied * PAGE_SIZE; } @@ -237,12 +233,10 @@ static inline unsigned long damon_pa_mark_accessed_or= _deactivate( unsigned long addr, applied =3D 0; =20 for (addr =3D r->ar.start; addr < r->ar.end; addr +=3D PAGE_SIZE) { - struct page *page =3D damon_get_page(PHYS_PFN(addr)); - struct folio *folio; + struct folio *folio =3D damon_get_folio(PHYS_PFN(addr)); =20 - if (!page) + if (!folio) continue; - folio =3D page_folio(page); =20 if (mark_accessed) folio_mark_accessed(folio); --=20 2.35.3