mm/vmscan.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-)
From: liuqiqi <liuqiqi@kylinos.cn>
In the zone_reclaimable_pages() function, if the page counts for
NR_ZONE_INACTIVE_FILE, NR_ZONE_ACTIVE_FILE, NR_ZONE_INACTIVE_ANON,
and NR_ZONE_ACTIVE_ANON are all zero,
the function returns the number of free pages as the result.
In this case, when should_reclaim_retry() calculates reclaimable pages,
it will inadvertently double-count the free pages in its accounting.
static inline bool
should_reclaim_retry(gfp_t gfp_mask, unsigned order,
struct alloc_context *ac, int alloc_flags,
bool did_some_progress, int *no_progress_loops)
{
...
available = reclaimable = zone_reclaimable_pages(zone);
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
Signed-off-by: liuqiqi <liuqiqi@kylinos.cn>
---
mm/vmscan.c | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 34410d24dc15..a9aaefdba7a2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -393,14 +393,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
- /*
- * If there are no reclaimable file-backed or anonymous pages,
- * ensure zones with sufficient free pages are not skipped.
- * This prevents zones like DMA32 from being ignored in reclaim
- * scenarios where they can still help alleviate memory pressure.
- */
- if (nr == 0)
- nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
+
return nr;
}
@@ -6417,7 +6410,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
return true;
for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
- if (!zone_reclaimable_pages(zone))
+ if (!zone_reclaimable_pages(zone) && zone_page_state_snapshot(zone, NR_FREE_PAGES))
continue;
pfmemalloc_reserve += min_wmark_pages(zone);
--
2.25.1
On Tue, 12 Aug 2025 15:02:10 +0800 liuqiqi@kylinos.cn wrote:
> From: liuqiqi <liuqiqi@kylinos.cn>
>
> In the zone_reclaimable_pages() function, if the page counts for
> NR_ZONE_INACTIVE_FILE, NR_ZONE_ACTIVE_FILE, NR_ZONE_INACTIVE_ANON,
> and NR_ZONE_ACTIVE_ANON are all zero,
> the function returns the number of free pages as the result.
>
> In this case, when should_reclaim_retry() calculates reclaimable pages,
> it will inadvertently double-count the free pages in its accounting.
>
> static inline bool
> should_reclaim_retry(gfp_t gfp_mask, unsigned order,
> struct alloc_context *ac, int alloc_flags,
> bool did_some_progress, int *no_progress_loops)
> {
> ...
> available = reclaimable = zone_reclaimable_pages(zone);
> available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
>
> Signed-off-by: liuqiqi <liuqiqi@kylinos.cn>
> ---
> mm/vmscan.c | 11 ++---------
> 1 file changed, 2 insertions(+), 9 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 34410d24dc15..a9aaefdba7a2 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -393,14 +393,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
> if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
> nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
> zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
> - /*
> - * If there are no reclaimable file-backed or anonymous pages,
> - * ensure zones with sufficient free pages are not skipped.
> - * This prevents zones like DMA32 from being ignored in reclaim
> - * scenarios where they can still help alleviate memory pressure.
> - */
> - if (nr == 0)
> - nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
> +
> return nr;
> }
>
> @@ -6417,7 +6410,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
> return true;
>
> for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
> - if (!zone_reclaimable_pages(zone))
> + if (!zone_reclaimable_pages(zone) && zone_page_state_snapshot(zone, NR_FREE_PAGES))
> continue;
>
It looks like the logic got inverted when this code moved around. Should this
be !zone_page_state_snapshot()?
-chris
在 2025/8/12 15:02, liuqiqi@kylinos.cn 写道:
> From: liuqiqi <liuqiqi@kylinos.cn>
>
> In the zone_reclaimable_pages() function, if the page counts for
> NR_ZONE_INACTIVE_FILE, NR_ZONE_ACTIVE_FILE, NR_ZONE_INACTIVE_ANON,
> and NR_ZONE_ACTIVE_ANON are all zero,
> the function returns the number of free pages as the result.
>
> In this case, when should_reclaim_retry() calculates reclaimable pages,
> it will inadvertently double-count the free pages in its accounting.
>
> static inline bool
> should_reclaim_retry(gfp_t gfp_mask, unsigned order,
> struct alloc_context *ac, int alloc_flags,
> bool did_some_progress, int *no_progress_loops)
> {
> ...
> available = reclaimable = zone_reclaimable_pages(zone);
> available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
A "fixes" should be added here.
Fixes: 6aaced5abd32 ("mm: vmscan: account for free pages to prevent infinite Loop in throttle_direct_reclaim()")
Reviewed-by: Ye Liu <liuye@kylinos.cn>
> Signed-off-by: liuqiqi <liuqiqi@kylinos.cn>
> ---
> mm/vmscan.c | 11 ++---------
> 1 file changed, 2 insertions(+), 9 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 34410d24dc15..a9aaefdba7a2 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -393,14 +393,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
> if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
> nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
> zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
> - /*
> - * If there are no reclaimable file-backed or anonymous pages,
> - * ensure zones with sufficient free pages are not skipped.
> - * This prevents zones like DMA32 from being ignored in reclaim
> - * scenarios where they can still help alleviate memory pressure.
> - */
> - if (nr == 0)
> - nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
> +
> return nr;
> }
>
> @@ -6417,7 +6410,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
> return true;
>
> for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
> - if (!zone_reclaimable_pages(zone))
> + if (!zone_reclaimable_pages(zone) && zone_page_state_snapshot(zone, NR_FREE_PAGES))
> continue;
>
> pfmemalloc_reserve += min_wmark_pages(zone);
--
Thanks,
Ye Liu
On Tue, 12 Aug 2025 15:02:10 +0800 liuqiqi@kylinos.cn wrote:
> From: liuqiqi <liuqiqi@kylinos.cn>
>
> In the zone_reclaimable_pages() function, if the page counts for
> NR_ZONE_INACTIVE_FILE, NR_ZONE_ACTIVE_FILE, NR_ZONE_INACTIVE_ANON,
> and NR_ZONE_ACTIVE_ANON are all zero,
> the function returns the number of free pages as the result.
>
> In this case, when should_reclaim_retry() calculates reclaimable pages,
> it will inadvertently double-count the free pages in its accounting.
>
> static inline bool
> should_reclaim_retry(gfp_t gfp_mask, unsigned order,
> struct alloc_context *ac, int alloc_flags,
> bool did_some_progress, int *no_progress_loops)
> {
> ...
> available = reclaimable = zone_reclaimable_pages(zone);
> available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
Thanks. Does this have any significant runtime effects?
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -393,14 +393,7 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
> if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
> nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
> zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
> - /*
> - * If there are no reclaimable file-backed or anonymous pages,
> - * ensure zones with sufficient free pages are not skipped.
> - * This prevents zones like DMA32 from being ignored in reclaim
> - * scenarios where they can still help alleviate memory pressure.
> - */
> - if (nr == 0)
> - nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
> +
> return nr;
> }
>
> @@ -6417,7 +6410,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
> return true;
>
> for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
> - if (!zone_reclaimable_pages(zone))
> + if (!zone_reclaimable_pages(zone) && zone_page_state_snapshot(zone, NR_FREE_PAGES))
> continue;
>
> pfmemalloc_reserve += min_wmark_pages(zone);
© 2016 - 2026 Red Hat, Inc.