From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 206672208B for ; Wed, 20 Mar 2024 07:37:40 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920261; cv=none; b=NDfuqzf1NeUNzYAgM9wE2E5oEUgcbkbTx4WQ6J3YRRcPV34/8Ursv267vaRdM3GoG82+7FzOlc7HdSYwJ1p4l58u0IsQKFygLhYOmW4Rw9+r7C81vuiFFoXQGc4fYCbc/rRb3oOpSaK3s/jkBS/FJvFJXi3eEn25RooGqT/Ub7k= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920261; c=relaxed/simple; bh=hzC9nwLvXTVgwQOMkdq44QZXi1tFep4qsRHVCT+3k4Q=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Vw3iRTdZ5dasSspZPjkz5q+f8zvIzWgKaogyVB+kYeX++pAIMhFs3q2SDH3jJ0yymKSR+NGJ+5kl5pdqTQ1kNaKbLgujL6oSUyqtZc6pD3slZ02po54F+MZFYLmrWnuCddCDDy1vwReVpjXBcBiMqmllyAyAZ8LvW7F4CoSDUG4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=lrtdz9EN; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="lrtdz9EN" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3238BC43399; Wed, 20 Mar 2024 07:37:37 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920260; bh=hzC9nwLvXTVgwQOMkdq44QZXi1tFep4qsRHVCT+3k4Q=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=lrtdz9ENRa11JSKhFuvNFzK8VDpY0QZ3x4rb6MBEa4oyMr2NJgCWpbHV2jZYw40bR o+z3HRQmduCdwuPMRFCiwdzy6MGkVlEhjTWCTGNYqu7oxcFus/w9HLX0YwV8uDh04h LqiI/CIgTKp/fa+5dnIicIq1hBK7r/Bdj41woNUyiRQVRrHmwoMRNKzSXt64RKgzyn p/CBIhtJBRbJtxgEIVB1GFVMzThQDWNeS2DCDX1aXc6SdcK1MXKOFDNN+2z8JuEzkQ NWaikWuHwAh6prjmxQyspSNviArYSxO43vZLCD5/cZuz28n+XaeKJc9r+l7JG9GlYp ttRhrdmd7DzPQ== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 01/11] mm/ksm: Convert get_ksm_page to return a folio Date: Wed, 20 Mar 2024 15:40:37 +0800 Message-ID: <20240320074049.4130552-2-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" The ksm only contains single pages, so use folio instead of pages to save a couple of compound_head calls. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 8c001819cf10..fda291b054c2 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -915,10 +915,10 @@ enum get_ksm_page_flags { * a page to put something that might look like our key in page->mapping. * is on its way to being freed; but it is an anomaly to bear in mind. */ -static struct page *get_ksm_page(struct ksm_stable_node *stable_node, +static void *get_ksm_page(struct ksm_stable_node *stable_node, enum get_ksm_page_flags flags) { - struct page *page; + struct folio *folio; void *expected_mapping; unsigned long kpfn; =20 @@ -926,8 +926,8 @@ static struct page *get_ksm_page(struct ksm_stable_node= *stable_node, PAGE_MAPPING_KSM); again: kpfn =3D READ_ONCE(stable_node->kpfn); /* Address dependency. */ - page =3D pfn_to_page(kpfn); - if (READ_ONCE(page->mapping) !=3D expected_mapping) + folio =3D pfn_folio(kpfn); + if (READ_ONCE(folio->mapping) !=3D expected_mapping) goto stale; =20 /* @@ -940,7 +940,7 @@ static struct page *get_ksm_page(struct ksm_stable_node= *stable_node, * in folio_migrate_mapping(), it might still be our page, * in which case it's essential to keep the node. */ - while (!get_page_unless_zero(page)) { + while (!folio_try_get(folio)) { /* * Another check for page->mapping !=3D expected_mapping would * work here too. We have chosen the !PageSwapCache test to @@ -949,32 +949,32 @@ static struct page *get_ksm_page(struct ksm_stable_no= de *stable_node, * in the ref_freeze section of __remove_mapping(); but Anon * page->mapping reset to NULL later, in free_pages_prepare(). */ - if (!PageSwapCache(page)) + if (!folio_test_swapcache(folio)) goto stale; cpu_relax(); } =20 - if (READ_ONCE(page->mapping) !=3D expected_mapping) { - put_page(page); + if (READ_ONCE(folio->mapping) !=3D expected_mapping) { + folio_put(folio); goto stale; } =20 if (flags =3D=3D GET_KSM_PAGE_TRYLOCK) { - if (!trylock_page(page)) { - put_page(page); + if (!folio_trylock(folio)) { + folio_put(folio); return ERR_PTR(-EBUSY); } } else if (flags =3D=3D GET_KSM_PAGE_LOCK) - lock_page(page); + folio_lock(folio); =20 if (flags !=3D GET_KSM_PAGE_NOLOCK) { - if (READ_ONCE(page->mapping) !=3D expected_mapping) { - unlock_page(page); - put_page(page); + if (READ_ONCE(folio->mapping) !=3D expected_mapping) { + folio_unlock(folio); + folio_put(folio); goto stale; } } - return page; + return folio; =20 stale: /* --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0CC3EAD49 for ; Wed, 20 Mar 2024 07:37:43 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920264; cv=none; b=V9hasQVGfj/+qDOVabAOAdx0fjipcYLoFkWQmWIx2lf8msMCG8/jDKjnrL5oPNL0M2zye1nh/z39eGfkSSCI+FMmTI8DalhxwNuz8NoatEZ6hcq4G2CDUtA/9IPkoeX2yCM31UivMbrVFq6CeXjsnV/mO5GqlRNjXvwNEd9Rug8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920264; c=relaxed/simple; bh=ogVdYvzWIODvepKssBDhcBjAFm2MyCuzb8iad0ikR9w=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=YTfK9ThmhNqxmrpKD+85tWn6oow1AgcVwJJU5orNt7JjIVeSzqCgsRhwtVNZcLAEAOwJYWBXGDR0RilGUXBEnNbXODK65c9hN4f8y3k4BOKN1z/dh7XlBxY00+WjneLN3AYdkCryrgDNZLkmrGi4e+lkv1w1vIkj/ZzJr/eks40= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=UxsXOdDy; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="UxsXOdDy" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 565C8C433F1; Wed, 20 Mar 2024 07:37:41 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920263; bh=ogVdYvzWIODvepKssBDhcBjAFm2MyCuzb8iad0ikR9w=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=UxsXOdDyeQz/LeOFJ5thcrVWPFUWSF0xAf89HiGMSe1y1BQ5CAtLaFvVrMHInOuCX n08x7z24gQKQo1Cys2BHt6sOQ0roKLbru2L4miwJv5QpKzMF0f31/Lm2LEH6kR523t 20RATbtX0L90o6l3JGyxonZhk3vkJOCXrcafdQzlPDTPVbAFVtXtB6Tv1vhu/iEyVG G7HhTffwrFbNX6CMK4TGBGryk31ciBggJCmbLm/iNrVqu7JvPkUAnhs0cNnxl4w+p9 pej6aCsV5pTOSmF5hEJTHfIxd7MPpkAe3JtPHU6COh7OhEJLXeBnUgbRwgXSco1Jsf EYGDO6JVtUAnw== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 02/11] mm/ksm: use a folio in remove_rmap_item_from_tree Date: Wed, 20 Mar 2024 15:40:38 +0800 Message-ID: <20240320074049.4130552-3-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" Save 2 compound_head calls. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index fda291b054c2..922e33500875 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -998,16 +998,16 @@ static void remove_rmap_item_from_tree(struct ksm_rma= p_item *rmap_item) { if (rmap_item->address & STABLE_FLAG) { struct ksm_stable_node *stable_node; - struct page *page; + struct folio *folio; =20 stable_node =3D rmap_item->head; - page =3D get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); - if (!page) + folio =3D get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); + if (!folio) goto out; =20 hlist_del(&rmap_item->hlist); - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); =20 if (!hlist_empty(&stable_node->hlist)) ksm_pages_sharing--; --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 054EF224D1 for ; Wed, 20 Mar 2024 07:37:46 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920267; cv=none; b=AdvsuNoXXK9Ggb3GuNUhaAEKScp70B65mTfqbbP/jbRJ8Bm79OrgRi/VzG7wSLWu8rr9NFzlrvHw9vBhVMSN0N+lDO2oIEzYK68OT6lPvrX6napLXHWVLy3M7/SacU6+rW6pTz+DeRjjqZFJZBqXe+nh5PB9Vk8y55AC6/MoZzA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920267; c=relaxed/simple; bh=QBm6pSuc3VKXpcso3yJosqmwb8/BJ2OoR9I3H0hBrRE=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=YNeby5+HYcRNTvXJmc2OVLrxjCWQzkFVNMuM6z+TJ/9VUgUXRwTzL6SfmjBGfc0/h8Op2kGOD+Q/PAjlpB0Vw5tPrKlgcf3FtB/R55D7LlZSEHeId5VO52v+51TIuBYLFc6CcuS5NCgR3qIPiDk1jqULe/2miQ0UFjTdeCrfNq8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=L37WiUcZ; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="L37WiUcZ" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 52E33C433C7; Wed, 20 Mar 2024 07:37:44 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920266; bh=QBm6pSuc3VKXpcso3yJosqmwb8/BJ2OoR9I3H0hBrRE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=L37WiUcZJsGDbOCHOPrSNTjyO0gSDx/iv+H7FACJLgcKsiZeTGrjxxkD+tePXxwzO WU/+a1n3c+tMsi/tk2O8Gzlwg/FAiUawPXh7x/4BAkeKr8YTmc9fE6ielqPWONuj93 Ww0aiZc8FfLVgEa2KjV3ORGARjUO3JItK016YlNbAU6e2v0Dc/fFd8+ab5keQ7xi8S aS9uwYzTy+ZEcrELKJecDguNV5953wDpPrZ+/U6DcP4heEe6vamrHAOEzr8kP02UQt uwZ1NaS2JnE/kpLYI1IEnYWxb+eaRQHQN5SL64g94xDcd63nVvXdoLNlqfqL8bJDhg mrQj1OmnP2LNQ== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 03/11] mm/ksm: use a folio in remove_stable_node Date: Wed, 20 Mar 2024 15:40:39 +0800 Message-ID: <20240320074049.4130552-4-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" pages in stable tree are all single normal page, use folios could save 3 calls to compound_head(). Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 922e33500875..9ea9b5ac44b4 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1107,11 +1107,11 @@ static inline void set_page_stable_node(struct page= *page, */ static int remove_stable_node(struct ksm_stable_node *stable_node) { - struct page *page; + struct folio *folio; int err; =20 - page =3D get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); - if (!page) { + folio =3D get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); + if (!folio) { /* * get_ksm_page did remove_node_from_stable_tree itself. */ @@ -1124,22 +1124,22 @@ static int remove_stable_node(struct ksm_stable_nod= e *stable_node) * merge_across_nodes/max_page_sharing be switched. */ err =3D -EBUSY; - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { /* * The stable node did not yet appear stale to get_ksm_page(), - * since that allows for an unmapped ksm page to be recognized + * since that allows for an unmapped ksm folio to be recognized * right up until it is freed; but the node is safe to remove. - * This page might be in an LRU cache waiting to be freed, + * This folio might be in an LRU cache waiting to be freed, * or it might be PageSwapCache (perhaps under writeback), * or it might have been removed from swapcache a moment ago. */ - set_page_stable_node(page, NULL); + set_page_stable_node(&folio->page, NULL); remove_node_from_stable_tree(stable_node); err =3D 0; } =20 - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return err; } =20 --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3F79D381BB for ; Wed, 20 Mar 2024 07:37:49 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920270; cv=none; b=eal3VOeypM6LpbZAD3R1NR9YPmMt5gkP8AchD6oo2aEvlEEYW+Lj+2JjcWOJ1jkykCp3hs55r8HRCVV5vg+oqEwWcUW0XFyIl3Kky0KjNQnAq6LUyhYyhEnjEd5MzSJjbeGFUvBzUvpMSNnpVVaX8XD9EaL4bDxbxAhlCmDqrpc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920270; c=relaxed/simple; bh=o9GRn+UUdxaobJAptmjj5VgVz5sUBp61iu84lZ36vgk=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=OOfAAm0y7Wfj3yp4qbvKMvIFNghtgmXmSTYuFeu+BcFxd3aN7eEE2wFvKsGKkxAE/+ibcMe9e+T6ZFbSd0/teTrKb7dgAKDeHt04eBxSyfqelosrPqJFvarjGSZInDFwEdo7WMNGhFAxUa6EQuK7zJWEm/elJzAsryadaorMlJo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=YI8Hv6Xm; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="YI8Hv6Xm" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5001CC43394; Wed, 20 Mar 2024 07:37:47 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920269; bh=o9GRn+UUdxaobJAptmjj5VgVz5sUBp61iu84lZ36vgk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=YI8Hv6Xm0jtVWQtKp1cEyat0wmWHkY/j8Kn9DULx7rBhfbctCAntP1sNzBbd/Bgxf GKK43+qd3J6n4vPv1LSUBWx7F0jGccn4GAjoLM7vsPEws0j8wqHcpLDkSqObqPUuVx S00afiEWDkzZAspltpRvm6n9CzWLkZZD+sF6Yv437TNRj14g+lzb+lT9VmbZKVVdi0 S0FJt5H5PXEH4w1WFjdRasWZANKxZgTMKd0TGJwcL18EO4qG0Z+afEB0r2hCI/K5mL QKchWwWwSDnOFXDVHupER88Io9H7AnuzjlMcmemfsbxURM5CC2DJV+0FmKEL2WDgAX 1i5wfHTNMdfWA== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 04/11] mm/ksm: use folio in stable_node_dup Date: Wed, 20 Mar 2024 15:40:40 +0800 Message-ID: <20240320074049.4130552-5-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" Save 2 compound_head calls. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 9ea9b5ac44b4..f57817ef75bf 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1617,14 +1617,14 @@ bool is_page_sharing_candidate(struct ksm_stable_no= de *stable_node) return __is_page_sharing_candidate(stable_node, 0); } =20 -static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_= dup, - struct ksm_stable_node **_stable_node, - struct rb_root *root, - bool prune_stale_stable_nodes) +static void *stable_node_dup(struct ksm_stable_node **_stable_node_dup, + struct ksm_stable_node **_stable_node, + struct rb_root *root, + bool prune_stale_stable_nodes) { struct ksm_stable_node *dup, *found =3D NULL, *stable_node =3D *_stable_n= ode; struct hlist_node *hlist_safe; - struct page *_tree_page, *tree_page =3D NULL; + struct folio *folio, *tree_folio =3D NULL; int nr =3D 0; int found_rmap_hlist_len; =20 @@ -1649,18 +1649,18 @@ static struct page *stable_node_dup(struct ksm_stab= le_node **_stable_node_dup, * stable_node parameter itself will be freed from * under us if it returns NULL. */ - _tree_page =3D get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); - if (!_tree_page) + folio =3D get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); + if (!folio) continue; nr +=3D 1; if (is_page_sharing_candidate(dup)) { if (!found || dup->rmap_hlist_len > found_rmap_hlist_len) { if (found) - put_page(tree_page); + folio_put(tree_folio); found =3D dup; found_rmap_hlist_len =3D found->rmap_hlist_len; - tree_page =3D _tree_page; + tree_folio =3D folio; =20 /* skip put_page for found dup */ if (!prune_stale_stable_nodes) @@ -1668,7 +1668,7 @@ static struct page *stable_node_dup(struct ksm_stable= _node **_stable_node_dup, continue; } } - put_page(_tree_page); + folio_put(folio); } =20 if (found) { @@ -1733,7 +1733,7 @@ static struct page *stable_node_dup(struct ksm_stable= _node **_stable_node_dup, } =20 *_stable_node_dup =3D found; - return tree_page; + return tree_folio; } =20 static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node = *stable_node, --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id ED2A838F9A for ; Wed, 20 Mar 2024 07:37:52 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920273; cv=none; b=APqxT1A0LaSOZPiCzqIWxcOOpQ/EHl7INMtuMjwfLnkZ49c8bg/iuifbEdsojUsPz46c5wL7kPdS3/ub98fcjlNAjam0m0reGR6KbHAlJOmAexf5UmEoIP/MX6C6redn7B9Hrqm4y83Eqm4XnOK5V7rzdpsnE9l9W9Tn4aSpVuQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920273; c=relaxed/simple; bh=4seeVX6AVPqZd4ygjF/loS0RJCIz9ENpBZHyDB1ONgY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=CWuKf8GHyaRBiemWrulrYAqTMFq/Fiw9tZ1NqiHz8oPr4UAVMXosdBhGyiXwKtyrr4YxLoENMnJTJadClxI87tA6bWM1Z7uXD5dwJK/UNiown3vheVGcJ/qkozKirldMm2oAQ38bKgcrbYQtOslTLLnRnUgCYg2Ldpjldst1wy4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=gcdYDTwN; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="gcdYDTwN" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 783FCC433C7; Wed, 20 Mar 2024 07:37:50 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920272; bh=4seeVX6AVPqZd4ygjF/loS0RJCIz9ENpBZHyDB1ONgY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=gcdYDTwNpPfOraYNtauQXzItCR4li8hYgUl6TNOp70HPLvjFG2OMzp63eiaHDkVJ4 lAQiNnL/FmDu2tGeykqradjGyyPa/riMOWFVA50dZoE5tjUOTyTWCivB0NUf4ZrLoA fXk6NuuqeTNyJVVuuLSoY8a7zoorFJ5HM06+E122HCv7C62/d67TiJISYDBBqXDASw MyCoEdgzJzMC+EVEbqI/DyO6j+yzlTpQeGAoyL3VrfpBmRCXr9VJmd7L4M3Yb+ZcXW NTgWZ7emrvaOYBFaEKJBAgDvaiqJPYAimCIhBECxGHQ31uddId4Ou16GQwAim0Vl+N vt2YiIJwwpK0w== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 05/11] mm/ksm: use a folio in scan_get_next_rmap_item func Date: Wed, 20 Mar 2024 15:40:41 +0800 Message-ID: <20240320074049.4130552-6-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" Save a compound calls. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index f57817ef75bf..165a3e4162bf 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2597,14 +2597,14 @@ static struct ksm_rmap_item *scan_get_next_rmap_ite= m(struct page **page) */ if (!ksm_merge_across_nodes) { struct ksm_stable_node *stable_node, *next; - struct page *page; + struct folio *folio; =20 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { - page =3D get_ksm_page(stable_node, + folio =3D get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); - if (page) - put_page(page); + if (folio) + folio_put(folio); cond_resched(); } } --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 54E0F3A1DC for ; Wed, 20 Mar 2024 07:37:56 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920276; cv=none; b=O/hRgST52B/0OoWd5uFVoXYrVDI3KJxVEwURBiRYG/ioxc+lcoyffi4I4c5j2MfEH8auQ1HMlifVktSXxRbp0376MnPYP37+g3HQnxAfjvYxCivzC8jt/bfy0K06qebtndMCp1tfPx3hyj6ts0aH979YEr5VeXQvFPBy9Q3xXAY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920276; c=relaxed/simple; bh=19yPEARCpp8Ts03v7RxMxCBtzle77h5p1XW4PvUaw5E=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=OACJU04CAjFVsO7yIXmstS5oFX9wjeKPaop5ITfdGGWnk3DVMCVIcbtgZ8TqTwfWDozTE15/wIGD/v6qMQQJsJb/5VCCh7L9fzjm5IgM3wQNArC9VYtOffRz/TIRTbdKjwrF+beWa1Jx5BHA08XJHgYJIniZo51I8OW7Yfy1kDM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=kDzTtQlW; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="kDzTtQlW" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 778E8C433F1; Wed, 20 Mar 2024 07:37:53 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920276; bh=19yPEARCpp8Ts03v7RxMxCBtzle77h5p1XW4PvUaw5E=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=kDzTtQlW47rs4jpnYM++UgOMgMjc6JVlGxhM/IaBtklgkLeG6r4Tnxl1Xk+XlC8O/ KQHON/2ylQa9Yuyx0B8AKyEW5/TRrTxN4xBejiHEVqucOuTU7IwSba7JJ+kXWreOOF R4+4Zyn9/Nev/5bFVVQ4UkccMEbHgMbDM6uTF739A2wUe83+/7uDOexjTk7NkSdnO7 dcnb3ZAD1eZm7Vkx6yt8emCSnB7fcs+Nq9lIWGKaeEwRTnfw1B+r3bex6R9HSkNFbI s/2hdV/dzXTTLvLx7rtB70nOF7m9hfqY35R7h5eY35GCu8RyZf6Qj9ygfbX6Qednoz MHs21FcRRCPOQ== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 06/11] mm/ksm: use folio in write_protect_page Date: Wed, 20 Mar 2024 15:40:42 +0800 Message-ID: <20240320074049.4130552-7-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" Compound page is checked and skipped before write_protect_page() called, use folio to save few compound_head checking and also remove duplicated compound checking again in the func. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 165a3e4162bf..ad3a0294a2ec 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1275,23 +1275,21 @@ static u32 calc_checksum(struct page *page) return checksum; } =20 -static int write_protect_page(struct vm_area_struct *vma, struct page *pag= e, +static int write_protect_page(struct vm_area_struct *vma, struct folio *fo= lio, pte_t *orig_pte) { struct mm_struct *mm =3D vma->vm_mm; - DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); + DEFINE_PAGE_VMA_WALK(pvmw, &folio->page, vma, 0, 0); int swapped; int err =3D -EFAULT; struct mmu_notifier_range range; bool anon_exclusive; pte_t entry; =20 - pvmw.address =3D page_address_in_vma(page, vma); + pvmw.address =3D page_address_in_vma(&folio->page, vma); if (pvmw.address =3D=3D -EFAULT) goto out; =20 - BUG_ON(PageTransCompound(page)); - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, pvmw.address + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); @@ -1301,12 +1299,12 @@ static int write_protect_page(struct vm_area_struct= *vma, struct page *page, if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; =20 - anon_exclusive =3D PageAnonExclusive(page); + anon_exclusive =3D PageAnonExclusive(&folio->page); entry =3D ptep_get(pvmw.pte); if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { - swapped =3D PageSwapCache(page); - flush_cache_page(vma, pvmw.address, page_to_pfn(page)); + swapped =3D folio_test_swapcache(folio); + flush_cache_page(vma, pvmw.address, folio_pfn(folio)); /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make @@ -1326,20 +1324,20 @@ static int write_protect_page(struct vm_area_struct= *vma, struct page *page, * Check that no O_DIRECT or similar I/O is in progress on the * page */ - if (page_mapcount(page) + 1 + swapped !=3D page_count(page)) { + if (folio_mapcount(folio) + 1 + swapped !=3D folio_ref_count(folio)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } =20 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (anon_exclusive && - folio_try_share_anon_rmap_pte(page_folio(page), page)) { + folio_try_share_anon_rmap_pte(folio, &folio->page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } =20 if (pte_dirty(entry)) - set_page_dirty(page); + folio_mark_dirty(folio); entry =3D pte_mkclean(entry); =20 if (pte_write(entry)) @@ -1505,7 +1503,7 @@ static int try_to_merge_one_page(struct vm_area_struc= t *vma, * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ - if (write_protect_page(vma, page, &orig_pte) =3D=3D 0) { + if (write_protect_page(vma, (struct folio *)page, &orig_pte) =3D=3D 0) { if (!kpage) { /* * While we hold page lock, upgrade page from --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3DE893BB2A for ; Wed, 20 Mar 2024 07:37:59 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920279; cv=none; b=qH036doOWnetpeGeesjZUUUm9eLcF/iSapvaBsigszCXiuEH6Yw8ic+1+o1gNAOXpUt76MDFoUnAuXJ6tVyrngd3dFBEr+dwdS4Y36SMvq5WaNewH4Cr8MoT8o/mxofkmPc6ShqzUcoDSXGXT7EbKzTE5JAqEOdCArRDKIaSLGI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920279; c=relaxed/simple; bh=1ed6BHhGoxgGo7EGuh+4C4k7rhnha86RpFOxOMhqKzo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=GmxPaHkIgcuHANNqAG3pBmc8vjKlVxCZKMeGfnk99IAAaD4AWOYe9rQmeu8/db1EMH+tEVkDnIFNWDLt/cmWgVhYRbjed01T/fPFBUAc+OdXxZlZEFZP3ui06AlceOha9Lz/IGPp6qQyHyv2dcqOe5Ir1k8OR9js10AaulidPQ4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=lhCa2Ax5; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="lhCa2Ax5" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9CD8FC433A6; Wed, 20 Mar 2024 07:37:56 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920279; bh=1ed6BHhGoxgGo7EGuh+4C4k7rhnha86RpFOxOMhqKzo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=lhCa2Ax5ueUSpXQj0Ttzhxb1k/1JVJoGcXSZZKbOYPU0aS/96v+eP5ke/tSy7N662 6DPmG+505PRZFGCUnmZSCvPx3WlBVs1RRA0XcpitnGkrM4XpfG2+lYZchImhrASWGO EcWQvcNxlf6/hHiuh7IQY1B9Ic3LnWLnJ1jLpMPMW2/8EzOLrr2wPG/KRvzYgT9yHn CpugyVk7CXv2ky9jyU8gzOp79le3M7KmVcTXdeeeBhTuDBXOVcTN2YtpAL+FUBdK0T PdRNe9PQZwHr24y72ln6sr4/2fihO/p3mT3hoZdMOUfNrybuYUqoerKx9wGy2IoKy0 Y13qi1nGB8Gbw== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 07/11] mm/ksm: Convert chain series funcs to use folio Date: Wed, 20 Mar 2024 15:40:43 +0800 Message-ID: <20240320074049.4130552-8-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" In ksm stable tree all page are single, let's convert them to use folios. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index ad3a0294a2ec..648fa695424b 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1761,7 +1761,7 @@ static struct ksm_stable_node *stable_node_dup_any(st= ruct ksm_stable_node *stabl * function and will be overwritten in all cases, the caller doesn't * need to initialize it. */ -static struct page *__stable_node_chain(struct ksm_stable_node **_stable_n= ode_dup, +static void *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, struct ksm_stable_node **_stable_node, struct rb_root *root, bool prune_stale_stable_nodes) @@ -1783,24 +1783,24 @@ static struct page *__stable_node_chain(struct ksm_= stable_node **_stable_node_du prune_stale_stable_nodes); } =20 -static __always_inline struct page *chain_prune(struct ksm_stable_node **s= _n_d, +static __always_inline void *chain_prune(struct ksm_stable_node **s_n_d, struct ksm_stable_node **s_n, struct rb_root *root) { return __stable_node_chain(s_n_d, s_n, root, true); } =20 -static __always_inline struct page *chain(struct ksm_stable_node **s_n_d, +static __always_inline void *chain(struct ksm_stable_node **s_n_d, struct ksm_stable_node *s_n, struct rb_root *root) { struct ksm_stable_node *old_stable_node =3D s_n; - struct page *tree_page; + struct folio *tree_folio; =20 - tree_page =3D __stable_node_chain(s_n_d, &s_n, root, false); + tree_folio =3D __stable_node_chain(s_n_d, &s_n, root, false); /* not pruning dups so s_n cannot have changed */ VM_BUG_ON(s_n !=3D old_stable_node); - return tree_page; + return tree_folio; } =20 /* --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 79A573C46B for ; Wed, 20 Mar 2024 07:38:02 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920283; cv=none; b=LBP5AkoMxc7XCs2SfY7R2PVbZwfHCFl13LgBkDQJpM7Jeoo2nF5eM+TUjmoz9aCIh2dgnHfX0KxvDkHd8tGVfhmKnylk0YXCAh3CUgbV5o3XGIRSNKSu/xrXuX96+g0HXqKtNOMHl4QZk6RfaSFEJNtcfPUCJAG7mcdeHNMHQRQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920283; c=relaxed/simple; bh=rdqHFbrLbUfNMTwK9NhbP/z7rMDrfdCWv7WBbuSbpoI=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=QRx7rbeuXqT9Pmib+YkWU0MroWz/VM5wYZZB/5PjFcf3JrkphE6UH9AVBtiqFaEJU0Iu+Qlm64oLC5yT18hf4MWjqLs2WUCaxbAmTcwlCEQ7WAD0h+QvuXvjWgo/hXtDaK2vLOKHo9JkiJIFkchujWpXsfwb5NDiO3L1UwhKoOE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=P7kGTdOt; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="P7kGTdOt" Received: by smtp.kernel.org (Postfix) with ESMTPSA id C4B16C433F1; Wed, 20 Mar 2024 07:37:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920282; bh=rdqHFbrLbUfNMTwK9NhbP/z7rMDrfdCWv7WBbuSbpoI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=P7kGTdOtJXwS+CqqQBmTtS71n3PQ18LbacGDQuh/lRh42lDg2iTEayH6tSB1h2bxN MrFoF+KbHSF2yqHtMdOX1qs+tosrFeLbpmemFHjLSVQmFpHa7XVRdgfCq44+BC2GQn hNlKEFkjh8KST5YownPLSNmyqo1XOoJhOYDcFNAsw+aE3GVFYVKTJ+/uLPlccUufPs X3wpNM1X8oqEebZhud/wtB3c2YdkoP9UUxDdszXnW+UBNIEJa9OhaeYN0j6V1EE/Nu zB9352SkNM7OKNTbg5EXBn8mtVsfJzr+hH2RNih4eLmYGeKuW/hUwDb+DbtupYZfg7 x9DhUNWe+gMfg== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 08/11] mm/ksm: Convert stable_tree_insert to use folio Date: Wed, 20 Mar 2024 15:40:44 +0800 Message-ID: <20240320074049.4130552-9-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" KSM stable tree only store single page, so convert the func users to use folio and save few compound_head calls. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 648fa695424b..71d1a52f344d 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2062,7 +2062,7 @@ static struct page *stable_tree_search(struct page *p= age) * This function returns the stable tree node just allocated on success, * NULL otherwise. */ -static struct ksm_stable_node *stable_tree_insert(struct page *kpage) +static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio) { int nid; unsigned long kpfn; @@ -2072,7 +2072,7 @@ static struct ksm_stable_node *stable_tree_insert(str= uct page *kpage) struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; bool need_chain =3D false; =20 - kpfn =3D page_to_pfn(kpage); + kpfn =3D folio_pfn(kfolio); nid =3D get_kpfn_nid(kpfn); root =3D root_stable_tree + nid; again: @@ -2080,13 +2080,13 @@ static struct ksm_stable_node *stable_tree_insert(s= truct page *kpage) new =3D &root->rb_node; =20 while (*new) { - struct page *tree_page; + struct folio *tree_folio; int ret; =20 cond_resched(); stable_node =3D rb_entry(*new, struct ksm_stable_node, node); stable_node_any =3D NULL; - tree_page =3D chain(&stable_node_dup, stable_node, root); + tree_folio =3D chain(&stable_node_dup, stable_node, root); if (!stable_node_dup) { /* * Either all stable_node dups were full in @@ -2108,11 +2108,11 @@ static struct ksm_stable_node *stable_tree_insert(s= truct page *kpage) * write protected at all times. Any will work * fine to continue the walk. */ - tree_page =3D get_ksm_page(stable_node_any, + tree_folio =3D get_ksm_page(stable_node_any, GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); - if (!tree_page) { + if (!tree_folio) { /* * If we walked over a stale stable_node, * get_ksm_page() will call rb_erase() and it @@ -2125,8 +2125,8 @@ static struct ksm_stable_node *stable_tree_insert(str= uct page *kpage) goto again; } =20 - ret =3D memcmp_pages(kpage, tree_page); - put_page(tree_page); + ret =3D memcmp_pages(&kfolio->page, &tree_folio->page); + folio_put(tree_folio); =20 parent =3D *new; if (ret < 0) @@ -2145,7 +2145,7 @@ static struct ksm_stable_node *stable_tree_insert(str= uct page *kpage) =20 INIT_HLIST_HEAD(&stable_node_dup->hlist); stable_node_dup->kpfn =3D kpfn; - set_page_stable_node(kpage, stable_node_dup); + set_page_stable_node(&kfolio->page, stable_node_dup); stable_node_dup->rmap_hlist_len =3D 0; DO_NUMA(stable_node_dup->nid =3D nid); if (!need_chain) { @@ -2423,7 +2423,7 @@ static void cmp_and_merge_page(struct page *page, str= uct ksm_rmap_item *rmap_ite * node in the stable tree and add both rmap_items. */ lock_page(kpage); - stable_node =3D stable_tree_insert(kpage); + stable_node =3D stable_tree_insert(page_folio(kpage)); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node, false); --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id DF6803D0AD for ; Wed, 20 Mar 2024 07:38:05 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920286; cv=none; b=lc8SSXU7Dtysu5Jh+d21NADqCKz6BZSOAlYOxKhgBsLC7Pwi3Gu1ZORlowPiqzEgCZbWWLBTIrgKM3FfCggqjxF04doS0ATRMp3uSh0K9R/9cZf7+8f6tnKS85NBUleesYcL6wMNYF8Pnb7LhHUPe464u/DaKxcIWb6UCe+b5HA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920286; c=relaxed/simple; bh=tjVPu2LfV6/oJL+/cGFCPeE7wstp4Ud62/5nB+tu7Fo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=ujnrxEeS0vTA/UMIe7QyHa6/vZGnN6NoCYHft7B26AtBiIOakwfKFTQRsec1eVOQRUaXU581diac2FK2xk+GnytJC4NekC0aKO91Yk1Pw2z9YPJ1y0x9PEcn2JPG3jGYBUpkcdNLD2TdvC1V5SFDheMolRhJ7epILyz10QIZN8s= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=QnyMHcy7; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="QnyMHcy7" Received: by smtp.kernel.org (Postfix) with ESMTPSA id F12D5C43390; Wed, 20 Mar 2024 07:38:02 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920285; bh=tjVPu2LfV6/oJL+/cGFCPeE7wstp4Ud62/5nB+tu7Fo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=QnyMHcy70fzp8mjnSBi4o1+7TO69JWuiRoHjNyhFVoYTtNUcOygYYs1aYzSwHZ4tP B1BEQoxvGpCI5WgyIKpDPphqywUn0C0GwKZ9osVoCfI7FwSszybl7JM+X7kspzNj1U ZaUyDOXUqJMrYPlAP3tjgfC0glaUofbiUSDuv3ZHGJJXB3urIxTdY8dqSOJpLF7Q4J +0xpRLKV7XWf7OvDi6wMx13J5ewLV5vUggPB+1o1apGUrBvZIVUI7PM1LA3+YdCGy4 16PM0s7aJeQoqB4eV7jgHGwGxedDMFcl/prNrGkefrYvjHT9OTNC9gi84ELn/jrimA l05LCkNlt1caA== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 09/11] mm/ksm: Convert stable_tree_search to use folio Date: Wed, 20 Mar 2024 15:40:45 +0800 Message-ID: <20240320074049.4130552-10-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" Although, the func may pass a tail page to check its contents, but only single page exist in KSM stable tree, so we still can use folio in stable_tree_search() to save a few compound_head calls. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 60 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 71d1a52f344d..75401b3bae5c 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1812,7 +1812,7 @@ static __always_inline void *chain(struct ksm_stable_= node **s_n_d, * This function returns the stable tree node of identical content if foun= d, * NULL otherwise. */ -static struct page *stable_tree_search(struct page *page) +static void *stable_tree_search(struct page *page) { int nid; struct rb_root *root; @@ -1820,28 +1820,30 @@ static struct page *stable_tree_search(struct page = *page) struct rb_node *parent; struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; struct ksm_stable_node *page_node; + struct folio *folio; =20 - page_node =3D page_stable_node(page); + folio =3D (struct folio *)page; + page_node =3D folio_stable_node(folio); if (page_node && page_node->head !=3D &migrate_nodes) { /* ksm page forked */ - get_page(page); - return page; + folio_get(folio); + return folio; } =20 - nid =3D get_kpfn_nid(page_to_pfn(page)); + nid =3D get_kpfn_nid(folio_pfn(folio)); root =3D root_stable_tree + nid; again: new =3D &root->rb_node; parent =3D NULL; =20 while (*new) { - struct page *tree_page; + struct folio *tree_folio; int ret; =20 cond_resched(); stable_node =3D rb_entry(*new, struct ksm_stable_node, node); stable_node_any =3D NULL; - tree_page =3D chain_prune(&stable_node_dup, &stable_node, root); + tree_folio =3D chain_prune(&stable_node_dup, &stable_node, root); /* * NOTE: stable_node may have been freed by * chain_prune() if the returned stable_node_dup is @@ -1875,11 +1877,11 @@ static struct page *stable_tree_search(struct page = *page) * write protected at all times. Any will work * fine to continue the walk. */ - tree_page =3D get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); + tree_folio =3D get_ksm_page(stable_node_any, + GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); - if (!tree_page) { + if (!tree_folio) { /* * If we walked over a stale stable_node, * get_ksm_page() will call rb_erase() and it @@ -1892,8 +1894,8 @@ static struct page *stable_tree_search(struct page *p= age) goto again; } =20 - ret =3D memcmp_pages(page, tree_page); - put_page(tree_page); + ret =3D memcmp_pages(&folio->page, &tree_folio->page); + folio_put(tree_folio); =20 parent =3D *new; if (ret < 0) @@ -1936,26 +1938,26 @@ static struct page *stable_tree_search(struct page = *page) * It would be more elegant to return stable_node * than kpage, but that involves more changes. */ - tree_page =3D get_ksm_page(stable_node_dup, - GET_KSM_PAGE_TRYLOCK); + tree_folio =3D get_ksm_page(stable_node_dup, + GET_KSM_PAGE_TRYLOCK); =20 - if (PTR_ERR(tree_page) =3D=3D -EBUSY) + if (PTR_ERR(tree_folio) =3D=3D -EBUSY) return ERR_PTR(-EBUSY); =20 - if (unlikely(!tree_page)) + if (unlikely(!tree_folio)) /* * The tree may have been rebalanced, * so re-evaluate parent and new. */ goto again; - unlock_page(tree_page); + folio_unlock(tree_folio); =20 if (get_kpfn_nid(stable_node_dup->kpfn) !=3D NUMA(stable_node_dup->nid)) { - put_page(tree_page); + folio_put(tree_folio); goto replace; } - return tree_page; + return tree_folio; } } =20 @@ -1968,8 +1970,8 @@ static struct page *stable_tree_search(struct page *p= age) rb_insert_color(&page_node->node, root); out: if (is_page_sharing_candidate(page_node)) { - get_page(page); - return page; + folio_get(folio); + return folio; } else return NULL; =20 @@ -1994,12 +1996,12 @@ static struct page *stable_tree_search(struct page = *page) &page_node->node, root); if (is_page_sharing_candidate(page_node)) - get_page(page); + folio_get(folio); else - page =3D NULL; + folio =3D NULL; } else { rb_erase(&stable_node_dup->node, root); - page =3D NULL; + folio =3D NULL; } } else { VM_BUG_ON(!is_stable_node_chain(stable_node)); @@ -2010,16 +2012,16 @@ static struct page *stable_tree_search(struct page = *page) DO_NUMA(page_node->nid =3D nid); stable_node_chain_add_dup(page_node, stable_node); if (is_page_sharing_candidate(page_node)) - get_page(page); + folio_get(folio); else - page =3D NULL; + folio =3D NULL; } else { - page =3D NULL; + folio =3D NULL; } } stable_node_dup->head =3D &migrate_nodes; list_add(&stable_node_dup->list, stable_node_dup->head); - return page; + return folio; =20 chain_append: /* stable_node_dup could be null if it reached the limit */ @@ -2109,7 +2111,7 @@ static struct ksm_stable_node *stable_tree_insert(str= uct folio *kfolio) * fine to continue the walk. */ tree_folio =3D get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); + GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); if (!tree_folio) { --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 199CA3DB8C for ; Wed, 20 Mar 2024 07:38:08 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920289; cv=none; b=LvuSVChR1SIGkvWRGy43sbSffm4TQhlnF8i5Bc8yt6YybCkn5uCfmwNUCCiQht0dJAp4PTiSA5cRW7QyYm0WLrav5nLufOG7WHXlz8OnonLZfGhun+vyX/grAQdEa7h0nkRLJ+s0VSrr6/zUeB7qzZsPXPyOEBf+7XYoGUCNxGU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920289; c=relaxed/simple; bh=4/ipLVxA+I+/npLz6EvsQqobqTxElBqybjRD2uQTgUM=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=nUvSPtBi0f5eF6cUiIaHqO1z71vWylbBuhs/EjH5lJgOx82hozAxMe/F23uFZE6er0WQQZhT6CnjOCfvHWbaiGU9Bo01ick/P8uT1GkVqfdAqnnr5hXizZRv0QBVDaOyhr3Kwznmqao5E3Wt7fnbxDtD6/DfIKHyMIIG5k+ec40= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=NAz1Jtn5; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="NAz1Jtn5" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3178CC433F1; Wed, 20 Mar 2024 07:38:05 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920288; bh=4/ipLVxA+I+/npLz6EvsQqobqTxElBqybjRD2uQTgUM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=NAz1Jtn5Tv7xWex39egXoEsKh+6Jyz4na6sqHGHE5Gn7vpfr/BD5NGADxf131MnF8 nS679xCkTyyQY1M8WtgJdNq8mqaUixp8HZrftKU44Ld/m7tOJd5wILQYaeRALdGZav wS5kjN/WFPEojTEskJziqJTsLeXlW+eoz/PRYZVDNqgGvpd/i/xAsOTqtjaZ9HN5Wl e9MSXRQFkU1funeUToZImXIzxFfPPfX05lRAjuCpOe/pKbeh3Vldew9uu+mJRTZuYg XcIxbGSEnfSyVXgguUKGdo2qx73PFtZy6OF36ZSemWtHs+H/mFl9yoO47rNR5HxZDP teWlc/3bOCa/g== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 10/11] mm/ksm: rename get_ksm_page to get_ksm_folio and return type Date: Wed, 20 Mar 2024 15:40:46 +0800 Message-ID: <20240320074049.4130552-11-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" Now since all caller are changed to folio, return to folio and rename it as get_ksm_folio. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 75401b3bae5c..806ad4d2693b 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -890,14 +890,14 @@ static void remove_node_from_stable_tree(struct ksm_s= table_node *stable_node) free_stable_node(stable_node); } =20 -enum get_ksm_page_flags { +enum get_ksm_folio_flags { GET_KSM_PAGE_NOLOCK, GET_KSM_PAGE_LOCK, GET_KSM_PAGE_TRYLOCK }; =20 /* - * get_ksm_page: checks if the page indicated by the stable node + * get_ksm_folio: checks if the page indicated by the stable node * is still its ksm page, despite having held no reference to it. * In which case we can trust the content of the page, and it * returns the gotten page; but if the page has now been zapped, @@ -915,8 +915,8 @@ enum get_ksm_page_flags { * a page to put something that might look like our key in page->mapping. * is on its way to being freed; but it is an anomaly to bear in mind. */ -static void *get_ksm_page(struct ksm_stable_node *stable_node, - enum get_ksm_page_flags flags) +static struct folio *get_ksm_folio(struct ksm_stable_node *stable_node, + enum get_ksm_folio_flags flags) { struct folio *folio; void *expected_mapping; @@ -1001,7 +1001,7 @@ static void remove_rmap_item_from_tree(struct ksm_rma= p_item *rmap_item) struct folio *folio; =20 stable_node =3D rmap_item->head; - folio =3D get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); + folio =3D get_ksm_folio(stable_node, GET_KSM_PAGE_LOCK); if (!folio) goto out; =20 @@ -1110,10 +1110,10 @@ static int remove_stable_node(struct ksm_stable_nod= e *stable_node) struct folio *folio; int err; =20 - folio =3D get_ksm_page(stable_node, GET_KSM_PAGE_LOCK); + folio =3D get_ksm_folio(stable_node, GET_KSM_PAGE_LOCK); if (!folio) { /* - * get_ksm_page did remove_node_from_stable_tree itself. + * get_ksm_folio did remove_node_from_stable_tree itself. */ return 0; } @@ -1126,7 +1126,7 @@ static int remove_stable_node(struct ksm_stable_node = *stable_node) err =3D -EBUSY; if (!folio_mapped(folio)) { /* - * The stable node did not yet appear stale to get_ksm_page(), + * The stable node did not yet appear stale to get_ksm_folio(), * since that allows for an unmapped ksm folio to be recognized * right up until it is freed; but the node is safe to remove. * This folio might be in an LRU cache waiting to be freed, @@ -1641,13 +1641,13 @@ static void *stable_node_dup(struct ksm_stable_node= **_stable_node_dup, * We must walk all stable_node_dup to prune the stale * stable nodes during lookup. * - * get_ksm_page can drop the nodes from the + * get_ksm_folio can drop the nodes from the * stable_node->hlist if they point to freed pages * (that's why we do a _safe walk). The "dup" * stable_node parameter itself will be freed from * under us if it returns NULL. */ - folio =3D get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); + folio =3D get_ksm_folio(dup, GET_KSM_PAGE_NOLOCK); if (!folio) continue; nr +=3D 1; @@ -1748,7 +1748,7 @@ static struct ksm_stable_node *stable_node_dup_any(st= ruct ksm_stable_node *stabl } =20 /* - * Like for get_ksm_page, this function can free the *_stable_node and + * Like for get_ksm_folio, this function can free the *_stable_node and * *_stable_node_dup if the returned tree_page is NULL. * * It can also free and overwrite *_stable_node with the found @@ -1770,7 +1770,7 @@ static void *__stable_node_chain(struct ksm_stable_no= de **_stable_node_dup, if (!is_stable_node_chain(stable_node)) { if (is_page_sharing_candidate(stable_node)) { *_stable_node_dup =3D stable_node; - return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK); + return get_ksm_folio(stable_node, GET_KSM_PAGE_NOLOCK); } /* * _stable_node_dup set to NULL means the stable_node @@ -1877,14 +1877,14 @@ static void *stable_tree_search(struct page *page) * write protected at all times. Any will work * fine to continue the walk. */ - tree_folio =3D get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); + tree_folio =3D get_ksm_folio(stable_node_any, + GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); if (!tree_folio) { /* * If we walked over a stale stable_node, - * get_ksm_page() will call rb_erase() and it + * get_ksm_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate @@ -1938,8 +1938,8 @@ static void *stable_tree_search(struct page *page) * It would be more elegant to return stable_node * than kpage, but that involves more changes. */ - tree_folio =3D get_ksm_page(stable_node_dup, - GET_KSM_PAGE_TRYLOCK); + tree_folio =3D get_ksm_folio(stable_node_dup, + GET_KSM_PAGE_TRYLOCK); =20 if (PTR_ERR(tree_folio) =3D=3D -EBUSY) return ERR_PTR(-EBUSY); @@ -2110,14 +2110,14 @@ static struct ksm_stable_node *stable_tree_insert(s= truct folio *kfolio) * write protected at all times. Any will work * fine to continue the walk. */ - tree_folio =3D get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); + tree_folio =3D get_ksm_folio(stable_node_any, + GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); if (!tree_folio) { /* * If we walked over a stale stable_node, - * get_ksm_page() will call rb_erase() and it + * get_ksm_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate @@ -2601,8 +2601,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(= struct page **page) =20 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { - folio =3D get_ksm_page(stable_node, - GET_KSM_PAGE_NOLOCK); + folio =3D get_ksm_folio(stable_node, + GET_KSM_PAGE_NOLOCK); if (folio) folio_put(folio); cond_resched(); @@ -3229,7 +3229,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct= folio *folio) /* * newfolio->mapping was set in advance; now we need smp_wmb() * to make sure that the new stable_node->kpfn is visible - * to get_ksm_page() before it can see that folio->mapping + * to get_ksm_folio() before it can see that folio->mapping * has gone stale (or that folio_test_swapcache has been cleared). */ smp_wmb(); @@ -3256,7 +3256,7 @@ static bool stable_node_dup_remove_range(struct ksm_s= table_node *stable_node, if (stable_node->kpfn >=3D start_pfn && stable_node->kpfn < end_pfn) { /* - * Don't get_ksm_page, page has already gone: + * Don't get_ksm_folio, page has already gone: * which is why we keep kpfn instead of page* */ remove_node_from_stable_tree(stable_node); @@ -3344,7 +3344,7 @@ static int ksm_memory_callback(struct notifier_block = *self, * Most of the work is done by page migration; but there might * be a few stable_nodes left over, still pointing to struct * pages which have been offlined: prune those from the tree, - * otherwise get_ksm_page() might later try to access a + * otherwise get_ksm_folio() might later try to access a * non-existent struct page. */ ksm_check_stable_tree(mn->start_pfn, --=20 2.43.0 From nobody Mon Feb 9 04:28:56 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C38B53F9F8 for ; Wed, 20 Mar 2024 07:38:11 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920291; cv=none; b=leuMkqiQ626VIvLrh7LnB2Wy+pdNx4oLAkLuqodGIQwUU/K0E3n05gkz8ANVbU1xKkiW93+RyaFeg91PSU5uLFtjZvq85e8HXHDRTvJXN/m0ylE13TDGbG2mqof60DnrldDy6VYkuWQ9IAVzlHU5kXSm4iNCXmwDCM3bIzQvZjk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1710920291; c=relaxed/simple; bh=2eCzaBD/FPlVUCW+3c3mpz0T6iG1rGJcXt6EJeYetoo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Z1PbvLfQ+9ZZP+F75HJFkcErbYnjeVynlmAzasJr3KAPG4LawavGrMADQsk/NAXPGnqWJiHXEuEkUEfj1DDW7vnvCHGDOsc5SFeE2KFP+vzRvXqxj1IaDAhSW7yDPw/3QKfm2P/omkLgPUI1eF1MpEstqkErTl4fgbNRIoWgx8E= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=JJwpJSdF; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="JJwpJSdF" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 57DADC43399; Wed, 20 Mar 2024 07:38:09 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1710920291; bh=2eCzaBD/FPlVUCW+3c3mpz0T6iG1rGJcXt6EJeYetoo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=JJwpJSdF8PMQwKdjVsH6aJlrbH9++/7WiZkEWLeurmSr+JfpMJKrW+x4hDWfZCJfn dnRIC5q3bejKL1Rdjecw3lZ9mQfSrOecTOhXOsUmFmCHmT3nzZdOxZglTE6xGXFO7c jXeNkxlgkQZ5SMTxR8JJmBr2KNActnCXSpXBACqkNiERLUIZslmxWTpMsWwLpIjOeQ NXmB9l0gy1Jr+wUf/lw+AMatfNFMp0xfp0CdxVBc+c1jcuJS66HpsTnovaxLpVG+/k CXUcwj2LgYj6pkUzNz7WZ7VHtMwZEUn3E7O+PeCIqK7vRhs2kjEoEK5Mn7mwWr/nLx nLX9yNep5BLJA== From: alexs@kernel.org To: Izik Eidus , Matthew Wilcox , Andrea Arcangeli , Hugh Dickins , Chris Wright , kasong@tencent.com, Andrew Morton , linux-mm@kvack.org (open list:MEMORY MANAGEMENT), linux-kernel@vger.kernel.org (open list) Cc: linux-kernel@vger.kernel.org, "Alex Shi (tencent)" Subject: [PATCH 11/11] mm/ksm: return folio for chain series funcs Date: Wed, 20 Mar 2024 15:40:47 +0800 Message-ID: <20240320074049.4130552-12-alexs@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240320074049.4130552-1-alexs@kernel.org> References: <20240320074049.4130552-1-alexs@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: "Alex Shi (tencent)" Since all caller changed to folios, change their return type to folio too. Signed-off-by: Alex Shi (tencent) Cc: Izik Eidus Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Chris Wright --- mm/ksm.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 806ad4d2693b..74cf6c028380 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1615,10 +1615,10 @@ bool is_page_sharing_candidate(struct ksm_stable_no= de *stable_node) return __is_page_sharing_candidate(stable_node, 0); } =20 -static void *stable_node_dup(struct ksm_stable_node **_stable_node_dup, - struct ksm_stable_node **_stable_node, - struct rb_root *root, - bool prune_stale_stable_nodes) +static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node= _dup, + struct ksm_stable_node **_stable_node, + struct rb_root *root, + bool prune_stale_stable_nodes) { struct ksm_stable_node *dup, *found =3D NULL, *stable_node =3D *_stable_n= ode; struct hlist_node *hlist_safe; @@ -1761,10 +1761,10 @@ static struct ksm_stable_node *stable_node_dup_any(= struct ksm_stable_node *stabl * function and will be overwritten in all cases, the caller doesn't * need to initialize it. */ -static void *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, - struct ksm_stable_node **_stable_node, - struct rb_root *root, - bool prune_stale_stable_nodes) +static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_= node_dup, + struct ksm_stable_node **_stable_node, + struct rb_root *root, + bool prune_stale_stable_nodes) { struct ksm_stable_node *stable_node =3D *_stable_node; if (!is_stable_node_chain(stable_node)) { @@ -1783,16 +1783,16 @@ static void *__stable_node_chain(struct ksm_stable_= node **_stable_node_dup, prune_stale_stable_nodes); } =20 -static __always_inline void *chain_prune(struct ksm_stable_node **s_n_d, - struct ksm_stable_node **s_n, - struct rb_root *root) +static __always_inline struct folio *chain_prune(struct ksm_stable_node **= s_n_d, + struct ksm_stable_node **s_n, + struct rb_root *root) { return __stable_node_chain(s_n_d, s_n, root, true); } =20 -static __always_inline void *chain(struct ksm_stable_node **s_n_d, - struct ksm_stable_node *s_n, - struct rb_root *root) +static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d, + struct ksm_stable_node *s_n, + struct rb_root *root) { struct ksm_stable_node *old_stable_node =3D s_n; struct folio *tree_folio; --=20 2.43.0