From: Kairui Song <kasong@tencent.com>
The current loop will calculate the scan number on each iteration. The
number of folios to scan is based on the LRU length, with some unclear
behaviors, eg, the scan number is only shifted by reclaim priority when
aging is not needed or when at the default priority, and it couples
the number calculation with aging and rotation.
Adjust, simplify it, and decouple aging and rotation. Just calculate the
scan number for once at the beginning of the reclaim, always respect the
reclaim priority, and make the aging and rotation more explicit.
This slightly changes how aging and offline memcg reclaim works:
Previously, aging was always skipped at DEF_PRIORITY even when
eviction was impossible. Now, aging is always triggered when it
is necessary to make progress. The old behavior may waste a reclaim
iteration only to escalate priority, potentially causing over-reclaim
of slab and breaking reclaim balance in multi-cgroup setups.
Similar for offline memcg. Previously, offline memcg wouldn't be
aged unless it didn't have any evictable folios. Now, we might age
it if it has only 3 generations and the reclaim priority is less
than DEF_PRIORITY, which should be fine. On one hand, offline memcg
might still hold long-term folios, and in fact, a long-existing offline
memcg must be pinned by some long-term folios like shmem. These folios
might be used by other memcg, so aging them as ordinary memcg seems
correct. Besides, aging enables further reclaim of an offlined memcg,
which will certainly happen if we keep shrinking it. And offline
memcg might soon be no longer an issue with reparenting.
Overall, the memcg LRU rotation, as described in mmzone.h,
remains the same.
Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 74 +++++++++++++++++++++++++++++++++----------------------------
1 file changed, 40 insertions(+), 34 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 963362523782..93ffb3d98fed 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4913,49 +4913,44 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
}
static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
- int swappiness, unsigned long *nr_to_scan)
+ struct scan_control *sc, int swappiness)
{
DEFINE_MIN_SEQ(lruvec);
- *nr_to_scan = 0;
/* have to run aging, since eviction is not possible anymore */
if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
return true;
- *nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
+ /* try to get away with not aging at the default priority */
+ if (sc->priority == DEF_PRIORITY)
+ return false;
+
/* better to run aging even though eviction is still possible */
return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
}
-/*
- * For future optimizations:
- * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
- * reclaim.
- */
-static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
+static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
+ struct mem_cgroup *memcg, int swappiness)
{
- bool need_aging;
- unsigned long nr_to_scan;
- struct mem_cgroup *memcg = lruvec_memcg(lruvec);
- DEFINE_MAX_SEQ(lruvec);
-
- if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
- return -1;
-
- need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
+ unsigned long evictable, nr_to_scan;
+ evictable = lruvec_evictable_size(lruvec, swappiness);
+ nr_to_scan = evictable;
/* try to scrape all its memory if this memcg was deleted */
- if (nr_to_scan && !mem_cgroup_online(memcg))
+ if (!mem_cgroup_online(memcg))
return nr_to_scan;
nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
- /* try to get away with not aging at the default priority */
- if (!need_aging || sc->priority == DEF_PRIORITY)
- return nr_to_scan >> sc->priority;
+ /*
+ * Always respect scan priority, minimally target some folios
+ * to keep reclaim moving forwards.
+ */
+ nr_to_scan >>= sc->priority;
+ if (!nr_to_scan)
+ nr_to_scan = min(evictable, SWAP_CLUSTER_MAX);
- /* stop scanning this lruvec as it's low on cold folios */
- return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0;
+ return nr_to_scan;
}
static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
@@ -4985,31 +4980,43 @@ static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc)
return true;
}
+/*
+ * For future optimizations:
+ * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
+ * reclaim.
+ */
static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
+ bool need_rotate = false;
long nr_batch, nr_to_scan;
- unsigned long scanned = 0;
int swappiness = get_swappiness(lruvec, sc);
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
- while (true) {
+ nr_to_scan = get_nr_to_scan(lruvec, sc, memcg, swappiness);
+ while (nr_to_scan > 0) {
int delta;
+ DEFINE_MAX_SEQ(lruvec);
- nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
- if (nr_to_scan <= 0)
+ if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) {
+ need_rotate = true;
break;
+ }
+
+ if (should_run_aging(lruvec, max_seq, sc, swappiness)) {
+ if (try_to_inc_max_seq(lruvec, max_seq, swappiness, false))
+ need_rotate = true;
+ break;
+ }
nr_batch = min(nr_to_scan, MAX_LRU_BATCH);
delta = evict_folios(nr_batch, lruvec, sc, swappiness);
if (!delta)
break;
- scanned += delta;
- if (scanned >= nr_to_scan)
- break;
-
if (should_abort_scan(lruvec, sc))
break;
+ nr_to_scan -= delta;
cond_resched();
}
@@ -5035,8 +5042,7 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
}
- /* whether this lruvec should be rotated */
- return nr_to_scan < 0;
+ return need_rotate;
}
static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
--
2.53.0
Forwarding sashiko review:
https://sashiko.dev/#/patchset/20260403-mglru-reclaim-v3-0-a285efd6ff91%40tencent.com?part=4
> commit 89b3058e01113eb027062254e73446d398af330a
> Author: Kairui Song <kasong@tencent.com>
> mm/mglru: restructure the reclaim loop
> This commit restructures the MGLRU reclaim loop by calculating the scan
> number once at the beginning of reclaim and decoupling aging from rotation.
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 963362523782a..93ffb3d98fedf 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> [ ... ]
> > -static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
> > +static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
> > + struct mem_cgroup *memcg, int swappiness)
> > {
> > - bool need_aging;
> > - unsigned long nr_to_scan;
> > - struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> > - DEFINE_MAX_SEQ(lruvec);
> > -
> > - if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
> > - return -1;
> > -
> > - need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> > + unsigned long evictable, nr_to_scan;
> >
> > + evictable = lruvec_evictable_size(lruvec, swappiness);
> > + nr_to_scan = evictable;
> [ ... ]
> > nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
> >
> > - /* try to get away with not aging at the default priority */
> > - if (!need_aging || sc->priority == DEF_PRIORITY)
> > - return nr_to_scan >> sc->priority;
> > + /*
> > + * Always respect scan priority, minimally target some folios
> > + * to keep reclaim moving forwards.
> > + */
> > + nr_to_scan >>= sc->priority;
> > + if (!nr_to_scan)
> > + nr_to_scan = min(evictable, SWAP_CLUSTER_MAX);
> When falling back to a minimum scan size, does this bypass the memory.low
> protection applied earlier by apply_proportional_protection()?
> Since the fallback uses the original unprotected evictable value rather
> than the protected nr_to_scan calculated prior to the priority shift, it
> appears protected memcgs will evaluate to min(evictable, 32) = 32
> and be forcefully scanned, bypassing the protection.
> Could this also cause an inversion based on memcg size?
> For example, an unprotected memcg with 4,000 folios shifts to 0 at
> DEF_PRIORITY (4000 >> 12) and falls back to 32 folios, whereas a larger
> memcg with 4,096 folios shifts to 1 (4096 >> 12) and scans only 1 folio.
That's really trivial I think.
The low protection calculation already has a "max(scan, SWAP_CLUSTER_MAX)"
check. If it's concerning here then the
nr_to_scan = min(evictable, SWAP_CLUSTER_MAX) can be dropped. And actually
in previous version of this series there isn't such check and it was
concerned that always apply the priority shift could lead to inefficient
reclaim of small cgroup. Having the min check here seems safer and
make the reclaim loop a bit more like what it was.
In theory and in practise there isn't much difference since only very
tiny cgroups are slightly affected, and in most cases the reclaimer
already starts with requesting SWAP_CLUSTER_MAX folios to be reclaimed
(nr_to_reclaim).
Maybe some chore cleanup can be done for a perfect accurate reclaim
budget starting from the reclaim start point. Not sure if that
would be a practical thing to do though.
> > - /* stop scanning this lruvec as it's low on cold folios */
> > - return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0;
> > + return nr_to_scan;
> > }
> [ ... ]
> > +static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
> > {
> > + bool need_rotate = false;
> > long nr_batch, nr_to_scan;
> > - unsigned long scanned = 0;
> > int swappiness = get_swappiness(lruvec, sc);
> > + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> >
> > - while (true) {
> > + nr_to_scan = get_nr_to_scan(lruvec, sc, memcg, swappiness);
> > + while (nr_to_scan > 0) {
> > int delta;
> > + DEFINE_MAX_SEQ(lruvec);
> >
> > - nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
> > - if (nr_to_scan <= 0)
> > + if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) {
> > + need_rotate = true;
> > break;
> > + }
> > +
> > + if (should_run_aging(lruvec, max_seq, sc, swappiness)) {
> > + if (try_to_inc_max_seq(lruvec, max_seq, swappiness, false))
> > + need_rotate = true;
> > + break;
> > + }
> If an lruvec has zero evictable folios in its oldest generation,
> get_nr_to_scan() will return 0, and this loop will be completely bypassed.
No, get_nr_to_scan won't return zero if aging helps get this LRU
evictable. And if even aging won't help there is no point to
reclaim it.
> Because the memcg rotation check (mem_cgroup_below_min) and the MGLRU aging
> logic (should_run_aging) are now entirely inside this loop, will this leave
> an lruvec permanently stalled?
There won't be stall, in worst case empty memcgs filled all the
random bucket, it just requires a few more iteration to find a
reclaimable memcg.
But on second though, if get_nr_to_scan returns 0 in the first try,
try_to_shrink_lruvec should just return true to rotate unevictable
LRU to speed up following reclaim indeed. The old behavior is a bit
fuzzy about this since the rotation and not doing the best
choice either, roration is decided by if an aging success but
an unevictable LRU doesn't always trigger aging.
This part can be improved easily while at it, should be a
nice micro optimization, would do that if V3 is sent.
© 2016 - 2026 Red Hat, Inc.