From: Kairui Song <kasong@tencent.com>
Merge commonly used code for counting evictable folios in a lruvec.
No behavior change.
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 42 +++++++++++++++++-------------------------
1 file changed, 17 insertions(+), 25 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 33287ba4a500..d7fc7f1fe06d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4078,27 +4078,33 @@ static void set_initial_priority(struct pglist_data *pgdat, struct scan_control
sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
}
-static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
+static long lruvec_evictable_size(struct lruvec *lruvec, int swappiness)
{
int gen, type, zone;
- unsigned long total = 0;
- int swappiness = get_swappiness(lruvec, sc);
+ unsigned long seq, total = 0;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
- struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
DEFINE_MIN_SEQ(lruvec);
for_each_evictable_type(type, swappiness) {
- unsigned long seq;
-
for (seq = min_seq[type]; seq <= max_seq; seq++) {
gen = lru_gen_from_seq(seq);
-
for (zone = 0; zone < MAX_NR_ZONES; zone++)
total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
}
}
+ return total;
+}
+
+static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
+{
+ unsigned long total;
+ int swappiness = get_swappiness(lruvec, sc);
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+
+ total = lruvec_evictable_size(lruvec, swappiness);
+
/* whether the size is big enough to be helpful */
return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
}
@@ -4921,9 +4927,6 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
int swappiness, unsigned long *nr_to_scan)
{
- int gen, type, zone;
- unsigned long size = 0;
- struct lru_gen_folio *lrugen = &lruvec->lrugen;
DEFINE_MIN_SEQ(lruvec);
*nr_to_scan = 0;
@@ -4931,18 +4934,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
return true;
- for_each_evictable_type(type, swappiness) {
- unsigned long seq;
-
- for (seq = min_seq[type]; seq <= max_seq; seq++) {
- gen = lru_gen_from_seq(seq);
-
- for (zone = 0; zone < MAX_NR_ZONES; zone++)
- size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
- }
- }
-
- *nr_to_scan = size;
+ *nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
/* better to run aging even though eviction is still possible */
return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
}
@@ -4954,7 +4946,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
*/
static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
{
- bool success;
+ bool need_aging;
unsigned long nr_to_scan;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
DEFINE_MAX_SEQ(lruvec);
@@ -4962,7 +4954,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
return -1;
- success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
+ need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
/* try to scrape all its memory if this memcg was deleted */
if (nr_to_scan && !mem_cgroup_online(memcg))
@@ -4971,7 +4963,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
/* try to get away with not aging at the default priority */
- if (!success || sc->priority == DEF_PRIORITY)
+ if (!need_aging || sc->priority == DEF_PRIORITY)
return nr_to_scan >> sc->priority;
/* stop scanning this lruvec as it's low on cold folios */
--
2.53.0
On 3/18/26 3:08 AM, Kairui Song via B4 Relay wrote: > From: Kairui Song <kasong@tencent.com> > > Merge commonly used code for counting evictable folios in a lruvec. > > No behavior change. > > Signed-off-by: Kairui Song <kasong@tencent.com> > --- With lruvec_evictable_size() changed to return unsigned long, LGTM. Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
On 2026/3/18 3:08, Kairui Song via B4 Relay wrote:
> From: Kairui Song <kasong@tencent.com>
>
> Merge commonly used code for counting evictable folios in a lruvec.
>
> No behavior change.
>
> Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Chen Ridong <chenridong@huaweicloud.com>
> ---
> mm/vmscan.c | 42 +++++++++++++++++-------------------------
> 1 file changed, 17 insertions(+), 25 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 33287ba4a500..d7fc7f1fe06d 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4078,27 +4078,33 @@ static void set_initial_priority(struct pglist_data *pgdat, struct scan_control
> sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
> }
>
> -static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
> +static long lruvec_evictable_size(struct lruvec *lruvec, int swappiness)
> {
> int gen, type, zone;
> - unsigned long total = 0;
> - int swappiness = get_swappiness(lruvec, sc);
> + unsigned long seq, total = 0;
> struct lru_gen_folio *lrugen = &lruvec->lrugen;
> - struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> DEFINE_MAX_SEQ(lruvec);
> DEFINE_MIN_SEQ(lruvec);
>
> for_each_evictable_type(type, swappiness) {
> - unsigned long seq;
> -
> for (seq = min_seq[type]; seq <= max_seq; seq++) {
> gen = lru_gen_from_seq(seq);
> -
> for (zone = 0; zone < MAX_NR_ZONES; zone++)
> total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
> }
> }
>
> + return total;
> +}
> +
> +static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
> +{
> + unsigned long total;
> + int swappiness = get_swappiness(lruvec, sc);
> + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> +
> + total = lruvec_evictable_size(lruvec, swappiness);
> +
> /* whether the size is big enough to be helpful */
> return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
> }
> @@ -4921,9 +4927,6 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> int swappiness, unsigned long *nr_to_scan)
> {
> - int gen, type, zone;
> - unsigned long size = 0;
> - struct lru_gen_folio *lrugen = &lruvec->lrugen;
> DEFINE_MIN_SEQ(lruvec);
>
> *nr_to_scan = 0;
> @@ -4931,18 +4934,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
> return true;
>
> - for_each_evictable_type(type, swappiness) {
> - unsigned long seq;
> -
> - for (seq = min_seq[type]; seq <= max_seq; seq++) {
> - gen = lru_gen_from_seq(seq);
> -
> - for (zone = 0; zone < MAX_NR_ZONES; zone++)
> - size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
> - }
> - }
> -
> - *nr_to_scan = size;
> + *nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
> /* better to run aging even though eviction is still possible */
> return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
> }
> @@ -4954,7 +4946,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> */
> static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
> {
> - bool success;
> + bool need_aging;
I have suffered a lot because of this name. Thank you.
> unsigned long nr_to_scan;
> struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> DEFINE_MAX_SEQ(lruvec);
> @@ -4962,7 +4954,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
> return -1;
>
> - success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> + need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
>
> /* try to scrape all its memory if this memcg was deleted */
> if (nr_to_scan && !mem_cgroup_online(memcg))
> @@ -4971,7 +4963,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
>
> /* try to get away with not aging at the default priority */
> - if (!success || sc->priority == DEF_PRIORITY)
> + if (!need_aging || sc->priority == DEF_PRIORITY)
> return nr_to_scan >> sc->priority;
>
> /* stop scanning this lruvec as it's low on cold folios */
>
--
Best regards,
Ridong
For what it's worth, I applied the full series and ran it through some
basic functional testing, I didn't see any bugs or regressions from
that.
Unfortunately, the best signal would be actually deploying it under
some real serving workloads, but the latency for me to do that + get
results is like order(weeks) and I suspect you don't want to wait that
long. :)
This particular commit looks good besides one minor nitpick:
Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
On Wed, Mar 18, 2026 at 7:19 PM Chen Ridong <chenridong@huaweicloud.com> wrote:
>
>
>
> On 2026/3/18 3:08, Kairui Song via B4 Relay wrote:
> > From: Kairui Song <kasong@tencent.com>
> >
> > Merge commonly used code for counting evictable folios in a lruvec.
> >
> > No behavior change.
> >
> > Signed-off-by: Kairui Song <kasong@tencent.com>
>
> Reviewed-by: Chen Ridong <chenridong@huaweicloud.com>
>
> > ---
> > mm/vmscan.c | 42 +++++++++++++++++-------------------------
> > 1 file changed, 17 insertions(+), 25 deletions(-)
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 33287ba4a500..d7fc7f1fe06d 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -4078,27 +4078,33 @@ static void set_initial_priority(struct pglist_data *pgdat, struct scan_control
> > sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
> > }
> >
> > -static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
> > +static long lruvec_evictable_size(struct lruvec *lruvec, int swappiness)
> > {
Since `total` is unsigned long, should this function likewise return
`unsigned long`? It seems ideal to avoid conversions unless there's a
good reason to do so.
> > int gen, type, zone;
> > - unsigned long total = 0;
> > - int swappiness = get_swappiness(lruvec, sc);
> > + unsigned long seq, total = 0;
> > struct lru_gen_folio *lrugen = &lruvec->lrugen;
> > - struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> > DEFINE_MAX_SEQ(lruvec);
> > DEFINE_MIN_SEQ(lruvec);
> >
> > for_each_evictable_type(type, swappiness) {
> > - unsigned long seq;
> > -
> > for (seq = min_seq[type]; seq <= max_seq; seq++) {
> > gen = lru_gen_from_seq(seq);
> > -
> > for (zone = 0; zone < MAX_NR_ZONES; zone++)
> > total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
> > }
> > }
> >
> > + return total;
> > +}
> > +
> > +static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
> > +{
> > + unsigned long total;
> > + int swappiness = get_swappiness(lruvec, sc);
> > + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> > +
> > + total = lruvec_evictable_size(lruvec, swappiness);
> > +
> > /* whether the size is big enough to be helpful */
> > return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
> > }
> > @@ -4921,9 +4927,6 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> > static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> > int swappiness, unsigned long *nr_to_scan)
> > {
> > - int gen, type, zone;
> > - unsigned long size = 0;
> > - struct lru_gen_folio *lrugen = &lruvec->lrugen;
> > DEFINE_MIN_SEQ(lruvec);
> >
> > *nr_to_scan = 0;
> > @@ -4931,18 +4934,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> > if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
> > return true;
> >
> > - for_each_evictable_type(type, swappiness) {
> > - unsigned long seq;
> > -
> > - for (seq = min_seq[type]; seq <= max_seq; seq++) {
> > - gen = lru_gen_from_seq(seq);
> > -
> > - for (zone = 0; zone < MAX_NR_ZONES; zone++)
> > - size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
> > - }
> > - }
> > -
> > - *nr_to_scan = size;
> > + *nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
> > /* better to run aging even though eviction is still possible */
> > return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
> > }
> > @@ -4954,7 +4946,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> > */
> > static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
> > {
> > - bool success;
> > + bool need_aging;
>
> I have suffered a lot because of this name. Thank you.
>
> > unsigned long nr_to_scan;
> > struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> > DEFINE_MAX_SEQ(lruvec);
> > @@ -4962,7 +4954,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> > if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
> > return -1;
> >
> > - success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> > + need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> >
> > /* try to scrape all its memory if this memcg was deleted */
> > if (nr_to_scan && !mem_cgroup_online(memcg))
> > @@ -4971,7 +4963,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> > nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
> >
> > /* try to get away with not aging at the default priority */
> > - if (!success || sc->priority == DEF_PRIORITY)
> > + if (!need_aging || sc->priority == DEF_PRIORITY)
> > return nr_to_scan >> sc->priority;
> >
> > /* stop scanning this lruvec as it's low on cold folios */
> >
>
> --
> Best regards,
> Ridong
>
On Sat, Mar 21, 2026 at 3:53 AM Axel Rasmussen <axelrasmussen@google.com> wrote:
>
> For what it's worth, I applied the full series and ran it through some
> basic functional testing, I didn't see any bugs or regressions from
> that.
>
> Unfortunately, the best signal would be actually deploying it under
> some real serving workloads, but the latency for me to do that + get
> results is like order(weeks) and I suspect you don't want to wait that
> long. :)
>
> This particular commit looks good besides one minor nitpick:
>
> Reviewed-by: Axel Rasmussen <axelrasmussen@google.com>
Thanks! Usually I ran the series through a about 2 day long stress
test matrix, and similar code has been deployed to our production
fleet for months. Just didn't find enough time to push these upstream.
Glad to see we are making progress to improve MGLRU upstream.
> > > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > > index 33287ba4a500..d7fc7f1fe06d 100644
> > > --- a/mm/vmscan.c
> > > +++ b/mm/vmscan.c
> > > @@ -4078,27 +4078,33 @@ static void set_initial_priority(struct pglist_data *pgdat, struct scan_control
> > > sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
> > > }
> > >
> > > -static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
> > > +static long lruvec_evictable_size(struct lruvec *lruvec, int swappiness)
> > > {
>
> Since `total` is unsigned long, should this function likewise return
> `unsigned long`? It seems ideal to avoid conversions unless there's a
> good reason to do so.
Ah nice catch, that would avoid an implicit casting, I think it has no
effect but wIll update in V2.
On Wed, Mar 18, 2026 at 3:11 AM Kairui Song via B4 Relay
<devnull+kasong.tencent.com@kernel.org> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> Merge commonly used code for counting evictable folios in a lruvec.
>
> No behavior change.
>
> Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Barry Song <baohua@kernel.org>
[...]
> static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
> {
> - bool success;
> + bool need_aging;
Nice! Many times, I’ve been in the process of submitting a patch
to rename this `success`, as its current name is completely
unreadable and unclear in meaning.
Another `success`also needs some cleanup.
I mean this one:
static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
{
bool success;
...
success = try_to_shrink_lruvec(lruvec, sc);
}
yet:
static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
...
/* whether this lruvec should be rotated */
return nr_to_scan < 0;
}
I really can't see the connection between "should be rotated"
and "success".
> unsigned long nr_to_scan;
> struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> DEFINE_MAX_SEQ(lruvec);
> @@ -4962,7 +4954,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
> return -1;
>
> - success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> + need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
>
> /* try to scrape all its memory if this memcg was deleted */
> if (nr_to_scan && !mem_cgroup_online(memcg))
> @@ -4971,7 +4963,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
>
> /* try to get away with not aging at the default priority */
> - if (!success || sc->priority == DEF_PRIORITY)
> + if (!need_aging || sc->priority == DEF_PRIORITY)
> return nr_to_scan >> sc->priority;
>
> /* stop scanning this lruvec as it's low on cold folios */
>
Thanks
Barry
On Wed, Mar 18, 2026 at 5:47 PM Barry Song <21cnbao@gmail.com> wrote:
>
> On Wed, Mar 18, 2026 at 3:11 AM Kairui Song via B4 Relay
> <devnull+kasong.tencent.com@kernel.org> wrote:
> >
> > From: Kairui Song <kasong@tencent.com>
> >
> > Merge commonly used code for counting evictable folios in a lruvec.
> >
> > No behavior change.
> >
> > Signed-off-by: Kairui Song <kasong@tencent.com>
>
> Reviewed-by: Barry Song <baohua@kernel.org>
>
Hi Barry,
Thanks for the review.
> [...]
> > static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
> > {
> > - bool success;
> > + bool need_aging;
>
> Nice! Many times, I’ve been in the process of submitting a patch
> to rename this `success`, as its current name is completely
> unreadable and unclear in meaning.
>
> Another `success`also needs some cleanup.
> I mean this one:
Good suggestion. Perhaps I better split it into a standalone patch
with your suggested-by, will include such a patch in V2.
Hi Kairui,
On Tue, Mar 17, 2026 at 2:11 PM Kairui Song via B4 Relay
<devnull+kasong.tencent.com@kernel.org> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> Merge commonly used code for counting evictable folios in a lruvec.
>
> No behavior change.
>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
> mm/vmscan.c | 42 +++++++++++++++++-------------------------
> 1 file changed, 17 insertions(+), 25 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 33287ba4a500..d7fc7f1fe06d 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -4078,27 +4078,33 @@ static void set_initial_priority(struct pglist_data *pgdat, struct scan_control
> sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY);
> }
>
> -static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
> +static long lruvec_evictable_size(struct lruvec *lruvec, int swappiness)
> {
> int gen, type, zone;
> - unsigned long total = 0;
> - int swappiness = get_swappiness(lruvec, sc);
> + unsigned long seq, total = 0;
> struct lru_gen_folio *lrugen = &lruvec->lrugen;
> - struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> DEFINE_MAX_SEQ(lruvec);
> DEFINE_MIN_SEQ(lruvec);
>
> for_each_evictable_type(type, swappiness) {
> - unsigned long seq;
> -
> for (seq = min_seq[type]; seq <= max_seq; seq++) {
> gen = lru_gen_from_seq(seq);
> -
> for (zone = 0; zone < MAX_NR_ZONES; zone++)
> total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
> }
> }
>
> + return total;
> +}
> +
> +static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
> +{
> + unsigned long total;
> + int swappiness = get_swappiness(lruvec, sc);
> + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> +
> + total = lruvec_evictable_size(lruvec, swappiness);
> +
> /* whether the size is big enough to be helpful */
> return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
> }
> @@ -4921,9 +4927,6 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> int swappiness, unsigned long *nr_to_scan)
> {
> - int gen, type, zone;
> - unsigned long size = 0;
> - struct lru_gen_folio *lrugen = &lruvec->lrugen;
> DEFINE_MIN_SEQ(lruvec);
>
> *nr_to_scan = 0;
> @@ -4931,18 +4934,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq)
> return true;
>
> - for_each_evictable_type(type, swappiness) {
> - unsigned long seq;
> -
> - for (seq = min_seq[type]; seq <= max_seq; seq++) {
> - gen = lru_gen_from_seq(seq);
> -
> - for (zone = 0; zone < MAX_NR_ZONES; zone++)
> - size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
> - }
> - }
> -
> - *nr_to_scan = size;
> + *nr_to_scan = lruvec_evictable_size(lruvec, swappiness);
> /* better to run aging even though eviction is still possible */
> return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq;
> }
> @@ -4954,7 +4946,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
> */
> static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
> {
> - bool success;
> + bool need_aging;
> unsigned long nr_to_scan;
> struct mem_cgroup *memcg = lruvec_memcg(lruvec);
> DEFINE_MAX_SEQ(lruvec);
> @@ -4962,7 +4954,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg))
> return -1;
>
> - success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
> + need_aging = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan);
>
> /* try to scrape all its memory if this memcg was deleted */
> if (nr_to_scan && !mem_cgroup_online(memcg))
> @@ -4971,7 +4963,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int s
> nr_to_scan = apply_proportional_protection(memcg, sc, nr_to_scan);
>
> /* try to get away with not aging at the default priority */
> - if (!success || sc->priority == DEF_PRIORITY)
> + if (!need_aging || sc->priority == DEF_PRIORITY)
> return nr_to_scan >> sc->priority;
>
> /* stop scanning this lruvec as it's low on cold folios */
>
> --
> 2.53.0
>
>
Yep, the cleanup makes sense.
Acked-by: Yuanchu Xie <yuanchu@google.com>
© 2016 - 2026 Red Hat, Inc.