From: Zhang Peng <bruzzhang@tencent.com>
shrink_folio_list() contains a self-contained block that sets up
TTU flags and calls try_to_unmap(), accounting for failures via
reclaim_stat. Extract it into folio_try_unmap() to reduce the size
of shrink_folio_list() and make the unmap step independently readable.
No functional change.
Suggested-by: Kairui Song <kasong@tencent.com>
Signed-off-by: Zhang Peng <bruzzhang@tencent.com>
---
mm/vmscan.c | 70 +++++++++++++++++++++++++++++++++++--------------------------
1 file changed, 40 insertions(+), 30 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c8ff742ed891..63cc88c875e8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1217,6 +1217,44 @@ static void pageout_one(struct folio *folio, struct list_head *ret_folios,
folio_test_unevictable(folio), folio);
}
+static bool folio_try_unmap(struct folio *folio, struct reclaim_stat *stat,
+ unsigned int nr_pages)
+{
+ enum ttu_flags flags = TTU_BATCH_FLUSH;
+ bool was_swapbacked;
+
+ if (!folio_mapped(folio))
+ return true;
+
+ was_swapbacked = folio_test_swapbacked(folio);
+ if (folio_test_pmd_mappable(folio))
+ flags |= TTU_SPLIT_HUGE_PMD;
+ /*
+ * Without TTU_SYNC, try_to_unmap will only begin to
+ * hold PTL from the first present PTE within a large
+ * folio. Some initial PTEs might be skipped due to
+ * races with parallel PTE writes in which PTEs can be
+ * cleared temporarily before being written new present
+ * values. This will lead to a large folio is still
+ * mapped while some subpages have been partially
+ * unmapped after try_to_unmap; TTU_SYNC helps
+ * try_to_unmap acquire PTL from the first PTE,
+ * eliminating the influence of temporary PTE values.
+ */
+ if (folio_test_large(folio))
+ flags |= TTU_SYNC;
+
+ try_to_unmap(folio, flags);
+ if (folio_mapped(folio)) {
+ stat->nr_unmap_fail += nr_pages;
+ if (!was_swapbacked &&
+ folio_test_swapbacked(folio))
+ stat->nr_lazyfree_fail += nr_pages;
+ return false;
+ }
+ return true;
+}
+
/*
* Reclaimed folios are counted in stat->nr_reclaimed.
*/
@@ -1491,36 +1529,8 @@ static void shrink_folio_list(struct list_head *folio_list,
* The folio is mapped into the page tables of one or more
* processes. Try to unmap it here.
*/
- if (folio_mapped(folio)) {
- enum ttu_flags flags = TTU_BATCH_FLUSH;
- bool was_swapbacked = folio_test_swapbacked(folio);
-
- if (folio_test_pmd_mappable(folio))
- flags |= TTU_SPLIT_HUGE_PMD;
- /*
- * Without TTU_SYNC, try_to_unmap will only begin to
- * hold PTL from the first present PTE within a large
- * folio. Some initial PTEs might be skipped due to
- * races with parallel PTE writes in which PTEs can be
- * cleared temporarily before being written new present
- * values. This will lead to a large folio is still
- * mapped while some subpages have been partially
- * unmapped after try_to_unmap; TTU_SYNC helps
- * try_to_unmap acquire PTL from the first PTE,
- * eliminating the influence of temporary PTE values.
- */
- if (folio_test_large(folio))
- flags |= TTU_SYNC;
-
- try_to_unmap(folio, flags);
- if (folio_mapped(folio)) {
- stat->nr_unmap_fail += nr_pages;
- if (!was_swapbacked &&
- folio_test_swapbacked(folio))
- stat->nr_lazyfree_fail += nr_pages;
- goto activate_locked;
- }
- }
+ if (!folio_try_unmap(folio, stat, nr_pages))
+ goto activate_locked;
/*
* Folio is unmapped now so it cannot be newly pinned anymore.
--
2.43.7
On Fri, Apr 10, 2026 at 8:47 PM Zhang Peng <zippermonkey@icloud.com> wrote:
>
> From: Zhang Peng <bruzzhang@tencent.com>
>
> shrink_folio_list() contains a self-contained block that sets up
> TTU flags and calls try_to_unmap(), accounting for failures via
> reclaim_stat. Extract it into folio_try_unmap() to reduce the size
> of shrink_folio_list() and make the unmap step independently readable.
>
> No functional change.
>
> Suggested-by: Kairui Song <kasong@tencent.com>
> Signed-off-by: Zhang Peng <bruzzhang@tencent.com>
> ---
> mm/vmscan.c | 70 +++++++++++++++++++++++++++++++++++--------------------------
> 1 file changed, 40 insertions(+), 30 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index c8ff742ed891..63cc88c875e8 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1217,6 +1217,44 @@ static void pageout_one(struct folio *folio, struct list_head *ret_folios,
> folio_test_unevictable(folio), folio);
> }
>
> +static bool folio_try_unmap(struct folio *folio, struct reclaim_stat *stat,
> + unsigned int nr_pages)
> +{
> + enum ttu_flags flags = TTU_BATCH_FLUSH;
> + bool was_swapbacked;
> +
> + if (!folio_mapped(folio))
> + return true;
This is quite odd: the function unmaps, and then if the folio is
not mapped it returns true as “success.” I can’t really
connect that with “success” at all.
Can we move this logic out?
In shrink_folio_list(), we could simply do:
if (!folio_mapped(folio))
goto activate_locked;
Ensure we only call folio_try_unmap() for folios that
are actually mapped.
> +
> + was_swapbacked = folio_test_swapbacked(folio);
> + if (folio_test_pmd_mappable(folio))
> + flags |= TTU_SPLIT_HUGE_PMD;
> + /*
> + * Without TTU_SYNC, try_to_unmap will only begin to
> + * hold PTL from the first present PTE within a large
> + * folio. Some initial PTEs might be skipped due to
> + * races with parallel PTE writes in which PTEs can be
> + * cleared temporarily before being written new present
> + * values. This will lead to a large folio is still
> + * mapped while some subpages have been partially
> + * unmapped after try_to_unmap; TTU_SYNC helps
> + * try_to_unmap acquire PTL from the first PTE,
> + * eliminating the influence of temporary PTE values.
> + */
> + if (folio_test_large(folio))
> + flags |= TTU_SYNC;
> +
> + try_to_unmap(folio, flags);
> + if (folio_mapped(folio)) {
> + stat->nr_unmap_fail += nr_pages;
> + if (!was_swapbacked &&
> + folio_test_swapbacked(folio))
> + stat->nr_lazyfree_fail += nr_pages;
> + return false;
> + }
> + return true;
> +}
> +
> /*
> * Reclaimed folios are counted in stat->nr_reclaimed.
> */
> @@ -1491,36 +1529,8 @@ static void shrink_folio_list(struct list_head *folio_list,
> * The folio is mapped into the page tables of one or more
> * processes. Try to unmap it here.
> */
> - if (folio_mapped(folio)) {
> - enum ttu_flags flags = TTU_BATCH_FLUSH;
> - bool was_swapbacked = folio_test_swapbacked(folio);
> -
> - if (folio_test_pmd_mappable(folio))
> - flags |= TTU_SPLIT_HUGE_PMD;
> - /*
> - * Without TTU_SYNC, try_to_unmap will only begin to
> - * hold PTL from the first present PTE within a large
> - * folio. Some initial PTEs might be skipped due to
> - * races with parallel PTE writes in which PTEs can be
> - * cleared temporarily before being written new present
> - * values. This will lead to a large folio is still
> - * mapped while some subpages have been partially
> - * unmapped after try_to_unmap; TTU_SYNC helps
> - * try_to_unmap acquire PTL from the first PTE,
> - * eliminating the influence of temporary PTE values.
> - */
> - if (folio_test_large(folio))
> - flags |= TTU_SYNC;
> -
> - try_to_unmap(folio, flags);
> - if (folio_mapped(folio)) {
> - stat->nr_unmap_fail += nr_pages;
> - if (!was_swapbacked &&
> - folio_test_swapbacked(folio))
> - stat->nr_lazyfree_fail += nr_pages;
> - goto activate_locked;
> - }
> - }
> + if (!folio_try_unmap(folio, stat, nr_pages))
> + goto activate_locked;
>
> /*
> * Folio is unmapped now so it cannot be newly pinned anymore.
>
> --
> 2.43.7
Thanks
Barry
© 2016 - 2026 Red Hat, Inc.