From: Kairui Song <kasong@tencent.com>
Currently MGLRU and non-MGLRU handle the reclaim statistic and
writeback handling very differently, especially throttling.
Basically MGLRU just ignored the throttling part.
Let's just unify this part, use a helper to deduplicate the code
so both setups will share the same behavior. Also remove the
folio_clear_reclaim in isolate_folio which was actively invalidating
the congestion control. PG_reclaim is now handled by shrink_folio_list,
keeping it in isolate_folio is not helpful.
Test using following reproducer using bash:
echo "Setup a slow device using dm delay"
dd if=/dev/zero of=/var/tmp/backing bs=1M count=2048
LOOP=$(losetup --show -f /var/tmp/backing)
mkfs.ext4 -q $LOOP
echo "0 $(blockdev --getsz $LOOP) delay $LOOP 0 0 $LOOP 0 1000" | \
dmsetup create slow_dev
mkdir -p /mnt/slow && mount /dev/mapper/slow_dev /mnt/slow
echo "Start writeback pressure"
sync && echo 3 > /proc/sys/vm/drop_caches
mkdir /sys/fs/cgroup/test_wb
echo 128M > /sys/fs/cgroup/test_wb/memory.max
(echo $BASHPID > /sys/fs/cgroup/test_wb/cgroup.procs && \
dd if=/dev/zero of=/mnt/slow/testfile bs=1M count=192)
echo "Clean up"
echo "0 $(blockdev --getsz $LOOP) error" | dmsetup load slow_dev
dmsetup resume slow_dev
umount -l /mnt/slow && sync
dmsetup remove slow_dev
Before this commit, `dd` will get OOM killed immediately if
MGLRU is enabled. Classic LRU is fine.
After this commit, congestion control is now effective and no more
spin on LRU or premature OOM.
Stress test on other workloads also looking good.
Suggested-by: Chen Ridong <chenridong@huaweicloud.com>
Signed-off-by: Kairui Song <kasong@tencent.com>
---
mm/vmscan.c | 93 +++++++++++++++++++++++++++----------------------------------
1 file changed, 41 insertions(+), 52 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1783da54ada1..83c8fdf8fdc4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1942,6 +1942,44 @@ static int current_may_throttle(void)
return !(current->flags & PF_LOCAL_THROTTLE);
}
+static void handle_reclaim_writeback(unsigned long nr_taken,
+ struct pglist_data *pgdat,
+ struct scan_control *sc,
+ struct reclaim_stat *stat)
+{
+ /*
+ * If dirty folios are scanned that are not queued for IO, it
+ * implies that flushers are not doing their job. This can
+ * happen when memory pressure pushes dirty folios to the end of
+ * the LRU before the dirty limits are breached and the dirty
+ * data has expired. It can also happen when the proportion of
+ * dirty folios grows not through writes but through memory
+ * pressure reclaiming all the clean cache. And in some cases,
+ * the flushers simply cannot keep up with the allocation
+ * rate. Nudge the flusher threads in case they are asleep.
+ */
+ if (stat->nr_unqueued_dirty == nr_taken && nr_taken) {
+ wakeup_flusher_threads(WB_REASON_VMSCAN);
+ /*
+ * For cgroupv1 dirty throttling is achieved by waking up
+ * the kernel flusher here and later waiting on folios
+ * which are in writeback to finish (see shrink_folio_list()).
+ *
+ * Flusher may not be able to issue writeback quickly
+ * enough for cgroupv1 writeback throttling to work
+ * on a large system.
+ */
+ if (!writeback_throttling_sane(sc))
+ reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
+ }
+
+ sc->nr.dirty += stat->nr_dirty;
+ sc->nr.congested += stat->nr_congested;
+ sc->nr.writeback += stat->nr_writeback;
+ sc->nr.immediate += stat->nr_immediate;
+ sc->nr.taken += nr_taken;
+}
+
/*
* shrink_inactive_list() is a helper for shrink_node(). It returns the number
* of reclaimed pages
@@ -2005,39 +2043,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
lruvec_lock_irq(lruvec);
lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
nr_scanned - nr_reclaimed);
-
- /*
- * If dirty folios are scanned that are not queued for IO, it
- * implies that flushers are not doing their job. This can
- * happen when memory pressure pushes dirty folios to the end of
- * the LRU before the dirty limits are breached and the dirty
- * data has expired. It can also happen when the proportion of
- * dirty folios grows not through writes but through memory
- * pressure reclaiming all the clean cache. And in some cases,
- * the flushers simply cannot keep up with the allocation
- * rate. Nudge the flusher threads in case they are asleep.
- */
- if (stat.nr_unqueued_dirty == nr_taken) {
- wakeup_flusher_threads(WB_REASON_VMSCAN);
- /*
- * For cgroupv1 dirty throttling is achieved by waking up
- * the kernel flusher here and later waiting on folios
- * which are in writeback to finish (see shrink_folio_list()).
- *
- * Flusher may not be able to issue writeback quickly
- * enough for cgroupv1 writeback throttling to work
- * on a large system.
- */
- if (!writeback_throttling_sane(sc))
- reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
- }
-
- sc->nr.dirty += stat.nr_dirty;
- sc->nr.congested += stat.nr_congested;
- sc->nr.writeback += stat.nr_writeback;
- sc->nr.immediate += stat.nr_immediate;
- sc->nr.taken += nr_taken;
-
+ handle_reclaim_writeback(nr_taken, pgdat, sc, &stat);
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
nr_scanned, nr_reclaimed, &stat, sc->priority, file);
return nr_reclaimed;
@@ -4651,9 +4657,6 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
if (!folio_test_referenced(folio))
set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
- /* for shrink_folio_list() */
- folio_clear_reclaim(folio);
-
success = lru_gen_del_folio(lruvec, folio, true);
VM_WARN_ON_ONCE_FOLIO(!success, folio);
@@ -4833,26 +4836,11 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
retry:
reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg);
sc->nr_reclaimed += reclaimed;
+ handle_reclaim_writeback(isolated, pgdat, sc, &stat);
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
type_scanned, reclaimed, &stat, sc->priority,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
- /*
- * If too many file cache in the coldest generation can't be evicted
- * due to being dirty, wake up the flusher.
- */
- if (stat.nr_unqueued_dirty == isolated) {
- wakeup_flusher_threads(WB_REASON_VMSCAN);
-
- /*
- * For cgroupv1 dirty throttling is achieved by waking up
- * the kernel flusher here and later waiting on folios
- * which are in writeback to finish (see shrink_folio_list()).
- */
- if (!writeback_throttling_sane(sc))
- reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
- }
-
list_for_each_entry_safe_reverse(folio, next, &list, lru) {
DEFINE_MIN_SEQ(lruvec);
@@ -4895,6 +4883,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
if (!list_empty(&list)) {
skip_retry = true;
+ isolated = 0;
goto retry;
}
--
2.53.0
On Sun, Mar 29, 2026 at 03:52:38AM +0800, Kairui Song via B4 Relay wrote: > From: Kairui Song <kasong@tencent.com> > > Currently MGLRU and non-MGLRU handle the reclaim statistic and > writeback handling very differently, especially throttling. > Basically MGLRU just ignored the throttling part. > > Let's just unify this part, use a helper to deduplicate the code > so both setups will share the same behavior. Also remove the > folio_clear_reclaim in isolate_folio which was actively invalidating > the congestion control. PG_reclaim is now handled by shrink_folio_list, > keeping it in isolate_folio is not helpful. > > Test using following reproducer using bash: > > echo "Setup a slow device using dm delay" > dd if=/dev/zero of=/var/tmp/backing bs=1M count=2048 > LOOP=$(losetup --show -f /var/tmp/backing) > mkfs.ext4 -q $LOOP > echo "0 $(blockdev --getsz $LOOP) delay $LOOP 0 0 $LOOP 0 1000" | \ > dmsetup create slow_dev > mkdir -p /mnt/slow && mount /dev/mapper/slow_dev /mnt/slow > > echo "Start writeback pressure" > sync && echo 3 > /proc/sys/vm/drop_caches > mkdir /sys/fs/cgroup/test_wb > echo 128M > /sys/fs/cgroup/test_wb/memory.max > (echo $BASHPID > /sys/fs/cgroup/test_wb/cgroup.procs && \ > dd if=/dev/zero of=/mnt/slow/testfile bs=1M count=192) > > echo "Clean up" > echo "0 $(blockdev --getsz $LOOP) error" | dmsetup load slow_dev > dmsetup resume slow_dev > umount -l /mnt/slow && sync > dmsetup remove slow_dev > > Before this commit, `dd` will get OOM killed immediately if > MGLRU is enabled. Classic LRU is fine. > > After this commit, congestion control is now effective and no more What do you mean by congestion control here? > spin on LRU or premature OOM. > > Stress test on other workloads also looking good. > > Suggested-by: Chen Ridong <chenridong@huaweicloud.com> > Signed-off-by: Kairui Song <kasong@tencent.com> There is still differences for global and kswapd reclaim in the shrink_node() like kswapd throttling and congestion state management and throttling. Any plan to unify them?
On Wed, Apr 01, 2026 at 07:39:03PM +0800, Shakeel Butt wrote: > On Sun, Mar 29, 2026 at 03:52:38AM +0800, Kairui Song via B4 Relay wrote: > > From: Kairui Song <kasong@tencent.com> > > > > Currently MGLRU and non-MGLRU handle the reclaim statistic and > > writeback handling very differently, especially throttling. > > Basically MGLRU just ignored the throttling part. > > > > Let's just unify this part, use a helper to deduplicate the code > > so both setups will share the same behavior. Also remove the > > folio_clear_reclaim in isolate_folio which was actively invalidating > > the congestion control. PG_reclaim is now handled by shrink_folio_list, > > keeping it in isolate_folio is not helpful. > > > > Test using following reproducer using bash: > > > > echo "Setup a slow device using dm delay" > > dd if=/dev/zero of=/var/tmp/backing bs=1M count=2048 > > LOOP=$(losetup --show -f /var/tmp/backing) > > mkfs.ext4 -q $LOOP > > echo "0 $(blockdev --getsz $LOOP) delay $LOOP 0 0 $LOOP 0 1000" | \ > > dmsetup create slow_dev > > mkdir -p /mnt/slow && mount /dev/mapper/slow_dev /mnt/slow > > > > echo "Start writeback pressure" > > sync && echo 3 > /proc/sys/vm/drop_caches > > mkdir /sys/fs/cgroup/test_wb > > echo 128M > /sys/fs/cgroup/test_wb/memory.max > > (echo $BASHPID > /sys/fs/cgroup/test_wb/cgroup.procs && \ > > dd if=/dev/zero of=/mnt/slow/testfile bs=1M count=192) > > > > echo "Clean up" > > echo "0 $(blockdev --getsz $LOOP) error" | dmsetup load slow_dev > > dmsetup resume slow_dev > > umount -l /mnt/slow && sync > > dmsetup remove slow_dev > > > > Before this commit, `dd` will get OOM killed immediately if > > MGLRU is enabled. Classic LRU is fine. > > > > After this commit, congestion control is now effective and no more > > What do you mean by congestion control here? This particular case demostrated here is VMSCAN_THROTTLE_CONGESTED so I described it as "congestion control", may I'll just say throttling to avoid confusion, it's not limited to that. > > > spin on LRU or premature OOM. > > > > Stress test on other workloads also looking good. > > > > Suggested-by: Chen Ridong <chenridong@huaweicloud.com> > > Signed-off-by: Kairui Song <kasong@tencent.com> > > There is still differences for global and kswapd reclaim in the shrink_node() > like kswapd throttling and congestion state management and throttling. Any plan > to unify them? Of course. Let fix it step by step, this series is pretty long already. I originally plan to put this patch in a later series, but as Ridong pointed out leaving these counter updated but unused looks really ugly. And this fix is clean and easily to understand I think.
On Thu, Apr 02, 2026 at 10:56:07AM +0800, Kairui Song wrote: > On Wed, Apr 01, 2026 at 07:39:03PM +0800, Shakeel Butt wrote: > > On Sun, Mar 29, 2026 at 03:52:38AM +0800, Kairui Song via B4 Relay wrote: > > > From: Kairui Song <kasong@tencent.com> > > > [...] > > > > > > After this commit, congestion control is now effective and no more > > > > What do you mean by congestion control here? > > This particular case demostrated here is VMSCAN_THROTTLE_CONGESTED so > I described it as "congestion control", may I'll just say throttling to > avoid confusion, it's not limited to that. Yes use throttling or VMSCAN_THROTTLE_CONGESTED directly. Congestion control gives a networking vibe. > > > > > > spin on LRU or premature OOM. > > > > > > Stress test on other workloads also looking good. > > > > > > Suggested-by: Chen Ridong <chenridong@huaweicloud.com> > > > Signed-off-by: Kairui Song <kasong@tencent.com> > > > > There is still differences for global and kswapd reclaim in the shrink_node() > > like kswapd throttling and congestion state management and throttling. Any plan > > to unify them? > > Of course. Let fix it step by step, Fine with me but better to be clear about the destination. > this series is pretty long already. > I originally plan to put this patch in a later series, but as Ridong > pointed out leaving these counter updated but unused looks really > ugly. And this fix is clean and easily to understand I think.
On 3/29/26 3:52 AM, Kairui Song via B4 Relay wrote:
> From: Kairui Song <kasong@tencent.com>
>
> Currently MGLRU and non-MGLRU handle the reclaim statistic and
> writeback handling very differently, especially throttling.
> Basically MGLRU just ignored the throttling part.
>
> Let's just unify this part, use a helper to deduplicate the code
> so both setups will share the same behavior. Also remove the
> folio_clear_reclaim in isolate_folio which was actively invalidating
> the congestion control. PG_reclaim is now handled by shrink_folio_list,
> keeping it in isolate_folio is not helpful.
>
> Test using following reproducer using bash:
>
> echo "Setup a slow device using dm delay"
> dd if=/dev/zero of=/var/tmp/backing bs=1M count=2048
> LOOP=$(losetup --show -f /var/tmp/backing)
> mkfs.ext4 -q $LOOP
> echo "0 $(blockdev --getsz $LOOP) delay $LOOP 0 0 $LOOP 0 1000" | \
> dmsetup create slow_dev
> mkdir -p /mnt/slow && mount /dev/mapper/slow_dev /mnt/slow
>
> echo "Start writeback pressure"
> sync && echo 3 > /proc/sys/vm/drop_caches
> mkdir /sys/fs/cgroup/test_wb
> echo 128M > /sys/fs/cgroup/test_wb/memory.max
> (echo $BASHPID > /sys/fs/cgroup/test_wb/cgroup.procs && \
> dd if=/dev/zero of=/mnt/slow/testfile bs=1M count=192)
>
> echo "Clean up"
> echo "0 $(blockdev --getsz $LOOP) error" | dmsetup load slow_dev
> dmsetup resume slow_dev
> umount -l /mnt/slow && sync
> dmsetup remove slow_dev
>
Hi Kairui,
I have tested this patch series on both stable v6.1.163 and 7.0.0-rc5
regarding the writeback throttling issue you described on arm64 platform.
Test Results:
1. Kernel 6.1.163: I was unable to reproduce the OOM issue with the
provided test script. This is expected as the MGLRU writeback handling
might behave differently or the issue is less prominent in this specific
stable branch.
2. Kernel 7.0.0-rc5: I successfully reproduced the OOM issue using your
bash script. The dd process gets OOM killed shortly after starting the
writeback pressure.
Verification:
After applying your patch, I re-ran the test on 7.0-rc5:
1. The congestion control/throttling is now working as expected.
2. The OOM issue is resolved, and the dd process completes successfully
without being killed.
Tested-by: Leno Hou <lenohou@gmail.com>
> Before this commit, `dd` will get OOM killed immediately if
> MGLRU is enabled. Classic LRU is fine.
>
> After this commit, congestion control is now effective and no more
> spin on LRU or premature OOM.
>
> Stress test on other workloads also looking good.
>
> Suggested-by: Chen Ridong <chenridong@huaweicloud.com>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
> mm/vmscan.c | 93 +++++++++++++++++++++++++++----------------------------------
> 1 file changed, 41 insertions(+), 52 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 1783da54ada1..83c8fdf8fdc4 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1942,6 +1942,44 @@ static int current_may_throttle(void)
> return !(current->flags & PF_LOCAL_THROTTLE);
> }
>
> +static void handle_reclaim_writeback(unsigned long nr_taken,
> + struct pglist_data *pgdat,
> + struct scan_control *sc,
> + struct reclaim_stat *stat)
> +{
> + /*
> + * If dirty folios are scanned that are not queued for IO, it
> + * implies that flushers are not doing their job. This can
> + * happen when memory pressure pushes dirty folios to the end of
> + * the LRU before the dirty limits are breached and the dirty
> + * data has expired. It can also happen when the proportion of
> + * dirty folios grows not through writes but through memory
> + * pressure reclaiming all the clean cache. And in some cases,
> + * the flushers simply cannot keep up with the allocation
> + * rate. Nudge the flusher threads in case they are asleep.
> + */
> + if (stat->nr_unqueued_dirty == nr_taken && nr_taken) {
> + wakeup_flusher_threads(WB_REASON_VMSCAN);
> + /*
> + * For cgroupv1 dirty throttling is achieved by waking up
> + * the kernel flusher here and later waiting on folios
> + * which are in writeback to finish (see shrink_folio_list()).
> + *
> + * Flusher may not be able to issue writeback quickly
> + * enough for cgroupv1 writeback throttling to work
> + * on a large system.
> + */
> + if (!writeback_throttling_sane(sc))
> + reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
> + }
> +
> + sc->nr.dirty += stat->nr_dirty;
> + sc->nr.congested += stat->nr_congested;
> + sc->nr.writeback += stat->nr_writeback;
> + sc->nr.immediate += stat->nr_immediate;
> + sc->nr.taken += nr_taken;
> +}
> +
> /*
> * shrink_inactive_list() is a helper for shrink_node(). It returns the number
> * of reclaimed pages
> @@ -2005,39 +2043,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
> lruvec_lock_irq(lruvec);
> lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
> nr_scanned - nr_reclaimed);
> -
> - /*
> - * If dirty folios are scanned that are not queued for IO, it
> - * implies that flushers are not doing their job. This can
> - * happen when memory pressure pushes dirty folios to the end of
> - * the LRU before the dirty limits are breached and the dirty
> - * data has expired. It can also happen when the proportion of
> - * dirty folios grows not through writes but through memory
> - * pressure reclaiming all the clean cache. And in some cases,
> - * the flushers simply cannot keep up with the allocation
> - * rate. Nudge the flusher threads in case they are asleep.
> - */
> - if (stat.nr_unqueued_dirty == nr_taken) {
> - wakeup_flusher_threads(WB_REASON_VMSCAN);
> - /*
> - * For cgroupv1 dirty throttling is achieved by waking up
> - * the kernel flusher here and later waiting on folios
> - * which are in writeback to finish (see shrink_folio_list()).
> - *
> - * Flusher may not be able to issue writeback quickly
> - * enough for cgroupv1 writeback throttling to work
> - * on a large system.
> - */
> - if (!writeback_throttling_sane(sc))
> - reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
> - }
> -
> - sc->nr.dirty += stat.nr_dirty;
> - sc->nr.congested += stat.nr_congested;
> - sc->nr.writeback += stat.nr_writeback;
> - sc->nr.immediate += stat.nr_immediate;
> - sc->nr.taken += nr_taken;
> -
> + handle_reclaim_writeback(nr_taken, pgdat, sc, &stat);
> trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
> nr_scanned, nr_reclaimed, &stat, sc->priority, file);
> return nr_reclaimed;
> @@ -4651,9 +4657,6 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
> if (!folio_test_referenced(folio))
> set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
>
> - /* for shrink_folio_list() */
> - folio_clear_reclaim(folio);
> -
> success = lru_gen_del_folio(lruvec, folio, true);
> VM_WARN_ON_ONCE_FOLIO(!success, folio);
>
> @@ -4833,26 +4836,11 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
> retry:
> reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false, memcg);
> sc->nr_reclaimed += reclaimed;
> + handle_reclaim_writeback(isolated, pgdat, sc, &stat);
> trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
> type_scanned, reclaimed, &stat, sc->priority,
> type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
>
> - /*
> - * If too many file cache in the coldest generation can't be evicted
> - * due to being dirty, wake up the flusher.
> - */
> - if (stat.nr_unqueued_dirty == isolated) {
> - wakeup_flusher_threads(WB_REASON_VMSCAN);
> -
> - /*
> - * For cgroupv1 dirty throttling is achieved by waking up
> - * the kernel flusher here and later waiting on folios
> - * which are in writeback to finish (see shrink_folio_list()).
> - */
> - if (!writeback_throttling_sane(sc))
> - reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
> - }
> -
> list_for_each_entry_safe_reverse(folio, next, &list, lru) {
> DEFINE_MIN_SEQ(lruvec);
>
> @@ -4895,6 +4883,7 @@ static int evict_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
>
> if (!list_empty(&list)) {
> skip_retry = true;
> + isolated = 0;
> goto retry;
> }
>
>
--
Best regards,
Leno Hou
On 3/29/26 3:52 AM, Kairui Song via B4 Relay wrote:
> From: Kairui Song <kasong@tencent.com>
>
> Currently MGLRU and non-MGLRU handle the reclaim statistic and
> writeback handling very differently, especially throttling.
> Basically MGLRU just ignored the throttling part.
>
> Let's just unify this part, use a helper to deduplicate the code
> so both setups will share the same behavior. Also remove the
> folio_clear_reclaim in isolate_folio which was actively invalidating
> the congestion control. PG_reclaim is now handled by shrink_folio_list,
> keeping it in isolate_folio is not helpful.
>
> Test using following reproducer using bash:
>
> echo "Setup a slow device using dm delay"
> dd if=/dev/zero of=/var/tmp/backing bs=1M count=2048
> LOOP=$(losetup --show -f /var/tmp/backing)
> mkfs.ext4 -q $LOOP
> echo "0 $(blockdev --getsz $LOOP) delay $LOOP 0 0 $LOOP 0 1000" | \
> dmsetup create slow_dev
> mkdir -p /mnt/slow && mount /dev/mapper/slow_dev /mnt/slow
>
> echo "Start writeback pressure"
> sync && echo 3 > /proc/sys/vm/drop_caches
> mkdir /sys/fs/cgroup/test_wb
> echo 128M > /sys/fs/cgroup/test_wb/memory.max
> (echo $BASHPID > /sys/fs/cgroup/test_wb/cgroup.procs && \
> dd if=/dev/zero of=/mnt/slow/testfile bs=1M count=192)
>
> echo "Clean up"
> echo "0 $(blockdev --getsz $LOOP) error" | dmsetup load slow_dev
> dmsetup resume slow_dev
> umount -l /mnt/slow && sync
> dmsetup remove slow_dev
>
> Before this commit, `dd` will get OOM killed immediately if
> MGLRU is enabled. Classic LRU is fine.
>
> After this commit, congestion control is now effective and no more
> spin on LRU or premature OOM.
>
> Stress test on other workloads also looking good.
>
> Suggested-by: Chen Ridong <chenridong@huaweicloud.com>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
> mm/vmscan.c | 93 +++++++++++++++++++++++++++----------------------------------
> 1 file changed, 41 insertions(+), 52 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 1783da54ada1..83c8fdf8fdc4 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1942,6 +1942,44 @@ static int current_may_throttle(void)
> return !(current->flags & PF_LOCAL_THROTTLE);
> }
>
> +static void handle_reclaim_writeback(unsigned long nr_taken,
> + struct pglist_data *pgdat,
> + struct scan_control *sc,
> + struct reclaim_stat *stat)
> +{
> + /*
> + * If dirty folios are scanned that are not queued for IO, it
> + * implies that flushers are not doing their job. This can
> + * happen when memory pressure pushes dirty folios to the end of
> + * the LRU before the dirty limits are breached and the dirty
> + * data has expired. It can also happen when the proportion of
> + * dirty folios grows not through writes but through memory
> + * pressure reclaiming all the clean cache. And in some cases,
> + * the flushers simply cannot keep up with the allocation
> + * rate. Nudge the flusher threads in case they are asleep.
> + */
> + if (stat->nr_unqueued_dirty == nr_taken && nr_taken) {
> + wakeup_flusher_threads(WB_REASON_VMSCAN);
> + /*
> + * For cgroupv1 dirty throttling is achieved by waking up
> + * the kernel flusher here and later waiting on folios
> + * which are in writeback to finish (see shrink_folio_list()).
> + *
> + * Flusher may not be able to issue writeback quickly
> + * enough for cgroupv1 writeback throttling to work
> + * on a large system.
> + */
> + if (!writeback_throttling_sane(sc))
> + reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
> + }
> +
> + sc->nr.dirty += stat->nr_dirty;
> + sc->nr.congested += stat->nr_congested;
> + sc->nr.writeback += stat->nr_writeback;
> + sc->nr.immediate += stat->nr_immediate;
> + sc->nr.taken += nr_taken;
> +}
> +
> /*
> * shrink_inactive_list() is a helper for shrink_node(). It returns the number
> * of reclaimed pages
> @@ -2005,39 +2043,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
> lruvec_lock_irq(lruvec);
> lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
> nr_scanned - nr_reclaimed);
> -
> - /*
> - * If dirty folios are scanned that are not queued for IO, it
> - * implies that flushers are not doing their job. This can
> - * happen when memory pressure pushes dirty folios to the end of
> - * the LRU before the dirty limits are breached and the dirty
> - * data has expired. It can also happen when the proportion of
> - * dirty folios grows not through writes but through memory
> - * pressure reclaiming all the clean cache. And in some cases,
> - * the flushers simply cannot keep up with the allocation
> - * rate. Nudge the flusher threads in case they are asleep.
> - */
> - if (stat.nr_unqueued_dirty == nr_taken) {
> - wakeup_flusher_threads(WB_REASON_VMSCAN);
> - /*
> - * For cgroupv1 dirty throttling is achieved by waking up
> - * the kernel flusher here and later waiting on folios
> - * which are in writeback to finish (see shrink_folio_list()).
> - *
> - * Flusher may not be able to issue writeback quickly
> - * enough for cgroupv1 writeback throttling to work
> - * on a large system.
> - */
> - if (!writeback_throttling_sane(sc))
> - reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
> - }
> -
> - sc->nr.dirty += stat.nr_dirty;
> - sc->nr.congested += stat.nr_congested;
> - sc->nr.writeback += stat.nr_writeback;
> - sc->nr.immediate += stat.nr_immediate;
> - sc->nr.taken += nr_taken;
> -
> + handle_reclaim_writeback(nr_taken, pgdat, sc, &stat);
> trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
> nr_scanned, nr_reclaimed, &stat, sc->priority, file);
> return nr_reclaimed;
> @@ -4651,9 +4657,6 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
> if (!folio_test_referenced(folio))
> set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
>
> - /* for shrink_folio_list() */
> - folio_clear_reclaim(folio);
IMO, Moving this change into patch 8 would make more sense. Otherwise LGTM.
On Tue, Mar 31, 2026 at 05:24:39PM +0800, Baolin Wang wrote:
>
>
> On 3/29/26 3:52 AM, Kairui Song via B4 Relay wrote:
> > From: Kairui Song <kasong@tencent.com>
> >
> > Currently MGLRU and non-MGLRU handle the reclaim statistic and
> > writeback handling very differently, especially throttling.
> > Basically MGLRU just ignored the throttling part.
> >
> > Let's just unify this part, use a helper to deduplicate the code
> > so both setups will share the same behavior. Also remove the
> > folio_clear_reclaim in isolate_folio which was actively invalidating
> > the congestion control. PG_reclaim is now handled by shrink_folio_list,
> > keeping it in isolate_folio is not helpful.
> >
> > Test using following reproducer using bash:
> >
> > echo "Setup a slow device using dm delay"
> > dd if=/dev/zero of=/var/tmp/backing bs=1M count=2048
> > LOOP=$(losetup --show -f /var/tmp/backing)
> > mkfs.ext4 -q $LOOP
> > echo "0 $(blockdev --getsz $LOOP) delay $LOOP 0 0 $LOOP 0 1000" | \
> > dmsetup create slow_dev
> > mkdir -p /mnt/slow && mount /dev/mapper/slow_dev /mnt/slow
> >
> > echo "Start writeback pressure"
> > sync && echo 3 > /proc/sys/vm/drop_caches
> > mkdir /sys/fs/cgroup/test_wb
> > echo 128M > /sys/fs/cgroup/test_wb/memory.max
> > (echo $BASHPID > /sys/fs/cgroup/test_wb/cgroup.procs && \
> > dd if=/dev/zero of=/mnt/slow/testfile bs=1M count=192)
> >
> > echo "Clean up"
> > echo "0 $(blockdev --getsz $LOOP) error" | dmsetup load slow_dev
> > dmsetup resume slow_dev
> > umount -l /mnt/slow && sync
> > dmsetup remove slow_dev
> >
> > Before this commit, `dd` will get OOM killed immediately if
> > MGLRU is enabled. Classic LRU is fine.
> >
> > After this commit, congestion control is now effective and no more
> > spin on LRU or premature OOM.
> >
> > Stress test on other workloads also looking good.
> >
> > Suggested-by: Chen Ridong <chenridong@huaweicloud.com>
> > Signed-off-by: Kairui Song <kasong@tencent.com>
> > ---
> > mm/vmscan.c | 93 +++++++++++++++++++++++++++----------------------------------
> > 1 file changed, 41 insertions(+), 52 deletions(-)
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index 1783da54ada1..83c8fdf8fdc4 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -1942,6 +1942,44 @@ static int current_may_throttle(void)
> > return !(current->flags & PF_LOCAL_THROTTLE);
> > }
> > +static void handle_reclaim_writeback(unsigned long nr_taken,
> > + struct pglist_data *pgdat,
> > + struct scan_control *sc,
> > + struct reclaim_stat *stat)
> > +{
> > + /*
> > + * If dirty folios are scanned that are not queued for IO, it
> > + * implies that flushers are not doing their job. This can
> > + * happen when memory pressure pushes dirty folios to the end of
> > + * the LRU before the dirty limits are breached and the dirty
> > + * data has expired. It can also happen when the proportion of
> > + * dirty folios grows not through writes but through memory
> > + * pressure reclaiming all the clean cache. And in some cases,
> > + * the flushers simply cannot keep up with the allocation
> > + * rate. Nudge the flusher threads in case they are asleep.
> > + */
> > + if (stat->nr_unqueued_dirty == nr_taken && nr_taken) {
> > + wakeup_flusher_threads(WB_REASON_VMSCAN);
> > + /*
> > + * For cgroupv1 dirty throttling is achieved by waking up
> > + * the kernel flusher here and later waiting on folios
> > + * which are in writeback to finish (see shrink_folio_list()).
> > + *
> > + * Flusher may not be able to issue writeback quickly
> > + * enough for cgroupv1 writeback throttling to work
> > + * on a large system.
> > + */
> > + if (!writeback_throttling_sane(sc))
> > + reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
> > + }
> > +
> > + sc->nr.dirty += stat->nr_dirty;
> > + sc->nr.congested += stat->nr_congested;
> > + sc->nr.writeback += stat->nr_writeback;
> > + sc->nr.immediate += stat->nr_immediate;
> > + sc->nr.taken += nr_taken;
> > +}
> > +
> > /*
> > * shrink_inactive_list() is a helper for shrink_node(). It returns the number
> > * of reclaimed pages
> > @@ -2005,39 +2043,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
> > lruvec_lock_irq(lruvec);
> > lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
> > nr_scanned - nr_reclaimed);
> > -
> > - /*
> > - * If dirty folios are scanned that are not queued for IO, it
> > - * implies that flushers are not doing their job. This can
> > - * happen when memory pressure pushes dirty folios to the end of
> > - * the LRU before the dirty limits are breached and the dirty
> > - * data has expired. It can also happen when the proportion of
> > - * dirty folios grows not through writes but through memory
> > - * pressure reclaiming all the clean cache. And in some cases,
> > - * the flushers simply cannot keep up with the allocation
> > - * rate. Nudge the flusher threads in case they are asleep.
> > - */
> > - if (stat.nr_unqueued_dirty == nr_taken) {
> > - wakeup_flusher_threads(WB_REASON_VMSCAN);
> > - /*
> > - * For cgroupv1 dirty throttling is achieved by waking up
> > - * the kernel flusher here and later waiting on folios
> > - * which are in writeback to finish (see shrink_folio_list()).
> > - *
> > - * Flusher may not be able to issue writeback quickly
> > - * enough for cgroupv1 writeback throttling to work
> > - * on a large system.
> > - */
> > - if (!writeback_throttling_sane(sc))
> > - reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
> > - }
> > -
> > - sc->nr.dirty += stat.nr_dirty;
> > - sc->nr.congested += stat.nr_congested;
> > - sc->nr.writeback += stat.nr_writeback;
> > - sc->nr.immediate += stat.nr_immediate;
> > - sc->nr.taken += nr_taken;
> > -
> > + handle_reclaim_writeback(nr_taken, pgdat, sc, &stat);
> > trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
> > nr_scanned, nr_reclaimed, &stat, sc->priority, file);
> > return nr_reclaimed;
> > @@ -4651,9 +4657,6 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
> > if (!folio_test_referenced(folio))
> > set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
> > - /* for shrink_folio_list() */
> > - folio_clear_reclaim(folio);
>
> IMO, Moving this change into patch 8 would make more sense. Otherwise LGTM.
Thanks for the review! I made it a separate patch so we can better
identify which part had the performance gain, and patch 8 can keep
the review by. Patch 8 is still good without this, a few counters
are updated with no user, kind of wasted but that's harmless.
On 3/31/26 5:29 PM, Kairui Song wrote:
> On Tue, Mar 31, 2026 at 05:24:39PM +0800, Baolin Wang wrote:
>>
>>
>> On 3/29/26 3:52 AM, Kairui Song via B4 Relay wrote:
>>> From: Kairui Song <kasong@tencent.com>
>>>
>>> Currently MGLRU and non-MGLRU handle the reclaim statistic and
>>> writeback handling very differently, especially throttling.
>>> Basically MGLRU just ignored the throttling part.
>>>
>>> Let's just unify this part, use a helper to deduplicate the code
>>> so both setups will share the same behavior. Also remove the
>>> folio_clear_reclaim in isolate_folio which was actively invalidating
>>> the congestion control. PG_reclaim is now handled by shrink_folio_list,
>>> keeping it in isolate_folio is not helpful.
>>>
>>> Test using following reproducer using bash:
>>>
>>> echo "Setup a slow device using dm delay"
>>> dd if=/dev/zero of=/var/tmp/backing bs=1M count=2048
>>> LOOP=$(losetup --show -f /var/tmp/backing)
>>> mkfs.ext4 -q $LOOP
>>> echo "0 $(blockdev --getsz $LOOP) delay $LOOP 0 0 $LOOP 0 1000" | \
>>> dmsetup create slow_dev
>>> mkdir -p /mnt/slow && mount /dev/mapper/slow_dev /mnt/slow
>>>
>>> echo "Start writeback pressure"
>>> sync && echo 3 > /proc/sys/vm/drop_caches
>>> mkdir /sys/fs/cgroup/test_wb
>>> echo 128M > /sys/fs/cgroup/test_wb/memory.max
>>> (echo $BASHPID > /sys/fs/cgroup/test_wb/cgroup.procs && \
>>> dd if=/dev/zero of=/mnt/slow/testfile bs=1M count=192)
>>>
>>> echo "Clean up"
>>> echo "0 $(blockdev --getsz $LOOP) error" | dmsetup load slow_dev
>>> dmsetup resume slow_dev
>>> umount -l /mnt/slow && sync
>>> dmsetup remove slow_dev
>>>
>>> Before this commit, `dd` will get OOM killed immediately if
>>> MGLRU is enabled. Classic LRU is fine.
>>>
>>> After this commit, congestion control is now effective and no more
>>> spin on LRU or premature OOM.
>>>
>>> Stress test on other workloads also looking good.
>>>
>>> Suggested-by: Chen Ridong <chenridong@huaweicloud.com>
>>> Signed-off-by: Kairui Song <kasong@tencent.com>
>>> ---
>>> mm/vmscan.c | 93 +++++++++++++++++++++++++++----------------------------------
>>> 1 file changed, 41 insertions(+), 52 deletions(-)
>>>
>>> diff --git a/mm/vmscan.c b/mm/vmscan.c
>>> index 1783da54ada1..83c8fdf8fdc4 100644
>>> --- a/mm/vmscan.c
>>> +++ b/mm/vmscan.c
>>> @@ -1942,6 +1942,44 @@ static int current_may_throttle(void)
>>> return !(current->flags & PF_LOCAL_THROTTLE);
>>> }
>>> +static void handle_reclaim_writeback(unsigned long nr_taken,
>>> + struct pglist_data *pgdat,
>>> + struct scan_control *sc,
>>> + struct reclaim_stat *stat)
>>> +{
>>> + /*
>>> + * If dirty folios are scanned that are not queued for IO, it
>>> + * implies that flushers are not doing their job. This can
>>> + * happen when memory pressure pushes dirty folios to the end of
>>> + * the LRU before the dirty limits are breached and the dirty
>>> + * data has expired. It can also happen when the proportion of
>>> + * dirty folios grows not through writes but through memory
>>> + * pressure reclaiming all the clean cache. And in some cases,
>>> + * the flushers simply cannot keep up with the allocation
>>> + * rate. Nudge the flusher threads in case they are asleep.
>>> + */
>>> + if (stat->nr_unqueued_dirty == nr_taken && nr_taken) {
>>> + wakeup_flusher_threads(WB_REASON_VMSCAN);
>>> + /*
>>> + * For cgroupv1 dirty throttling is achieved by waking up
>>> + * the kernel flusher here and later waiting on folios
>>> + * which are in writeback to finish (see shrink_folio_list()).
>>> + *
>>> + * Flusher may not be able to issue writeback quickly
>>> + * enough for cgroupv1 writeback throttling to work
>>> + * on a large system.
>>> + */
>>> + if (!writeback_throttling_sane(sc))
>>> + reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
>>> + }
>>> +
>>> + sc->nr.dirty += stat->nr_dirty;
>>> + sc->nr.congested += stat->nr_congested;
>>> + sc->nr.writeback += stat->nr_writeback;
>>> + sc->nr.immediate += stat->nr_immediate;
>>> + sc->nr.taken += nr_taken;
>>> +}
>>> +
>>> /*
>>> * shrink_inactive_list() is a helper for shrink_node(). It returns the number
>>> * of reclaimed pages
>>> @@ -2005,39 +2043,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
>>> lruvec_lock_irq(lruvec);
>>> lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
>>> nr_scanned - nr_reclaimed);
>>> -
>>> - /*
>>> - * If dirty folios are scanned that are not queued for IO, it
>>> - * implies that flushers are not doing their job. This can
>>> - * happen when memory pressure pushes dirty folios to the end of
>>> - * the LRU before the dirty limits are breached and the dirty
>>> - * data has expired. It can also happen when the proportion of
>>> - * dirty folios grows not through writes but through memory
>>> - * pressure reclaiming all the clean cache. And in some cases,
>>> - * the flushers simply cannot keep up with the allocation
>>> - * rate. Nudge the flusher threads in case they are asleep.
>>> - */
>>> - if (stat.nr_unqueued_dirty == nr_taken) {
>>> - wakeup_flusher_threads(WB_REASON_VMSCAN);
>>> - /*
>>> - * For cgroupv1 dirty throttling is achieved by waking up
>>> - * the kernel flusher here and later waiting on folios
>>> - * which are in writeback to finish (see shrink_folio_list()).
>>> - *
>>> - * Flusher may not be able to issue writeback quickly
>>> - * enough for cgroupv1 writeback throttling to work
>>> - * on a large system.
>>> - */
>>> - if (!writeback_throttling_sane(sc))
>>> - reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
>>> - }
>>> -
>>> - sc->nr.dirty += stat.nr_dirty;
>>> - sc->nr.congested += stat.nr_congested;
>>> - sc->nr.writeback += stat.nr_writeback;
>>> - sc->nr.immediate += stat.nr_immediate;
>>> - sc->nr.taken += nr_taken;
>>> -
>>> + handle_reclaim_writeback(nr_taken, pgdat, sc, &stat);
>>> trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
>>> nr_scanned, nr_reclaimed, &stat, sc->priority, file);
>>> return nr_reclaimed;
>>> @@ -4651,9 +4657,6 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
>>> if (!folio_test_referenced(folio))
>>> set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
>>> - /* for shrink_folio_list() */
>>> - folio_clear_reclaim(folio);
>>
>> IMO, Moving this change into patch 8 would make more sense. Otherwise LGTM.
>
> Thanks for the review! I made it a separate patch so we can better
> identify which part had the performance gain, and patch 8 can keep
> the review by. Patch 8 is still good without this, a few counters
> are updated with no user, kind of wasted but that's harmless.
I’m not referring to all the above changes. What I mean is that the
'folio_clear_reclaim' removal should belong to patch 8. Since
shrink_folio_list() in patch 8 will handle the writeback logic,
folio_clear_reclaim() should also be removed in the same patch.
On Tue, Mar 31, 2026 at 05:36:26PM +0800, Baolin Wang wrote:
>
>
> On 3/31/26 5:29 PM, Kairui Song wrote:
> > On Tue, Mar 31, 2026 at 05:24:39PM +0800, Baolin Wang wrote:
> > >
> > >
> > > On 3/29/26 3:52 AM, Kairui Song via B4 Relay wrote:
> > > > From: Kairui Song <kasong@tencent.com>
> > > >
> > > > Currently MGLRU and non-MGLRU handle the reclaim statistic and
> > > > writeback handling very differently, especially throttling.
> > > > Basically MGLRU just ignored the throttling part.
> > > >
> > > > Let's just unify this part, use a helper to deduplicate the code
> > > > so both setups will share the same behavior. Also remove the
> > > > folio_clear_reclaim in isolate_folio which was actively invalidating
> > > > the congestion control. PG_reclaim is now handled by shrink_folio_list,
> > > > keeping it in isolate_folio is not helpful.
> > > >
> > > > Test using following reproducer using bash:
> > > >
> > > > echo "Setup a slow device using dm delay"
> > > > dd if=/dev/zero of=/var/tmp/backing bs=1M count=2048
> > > > LOOP=$(losetup --show -f /var/tmp/backing)
> > > > mkfs.ext4 -q $LOOP
> > > > echo "0 $(blockdev --getsz $LOOP) delay $LOOP 0 0 $LOOP 0 1000" | \
> > > > dmsetup create slow_dev
> > > > mkdir -p /mnt/slow && mount /dev/mapper/slow_dev /mnt/slow
> > > >
> > > > echo "Start writeback pressure"
> > > > sync && echo 3 > /proc/sys/vm/drop_caches
> > > > mkdir /sys/fs/cgroup/test_wb
> > > > echo 128M > /sys/fs/cgroup/test_wb/memory.max
> > > > (echo $BASHPID > /sys/fs/cgroup/test_wb/cgroup.procs && \
> > > > dd if=/dev/zero of=/mnt/slow/testfile bs=1M count=192)
> > > >
> > > > echo "Clean up"
> > > > echo "0 $(blockdev --getsz $LOOP) error" | dmsetup load slow_dev
> > > > dmsetup resume slow_dev
> > > > umount -l /mnt/slow && sync
> > > > dmsetup remove slow_dev
> > > >
> > > > Before this commit, `dd` will get OOM killed immediately if
> > > > MGLRU is enabled. Classic LRU is fine.
> > > >
> > > > After this commit, congestion control is now effective and no more
> > > > spin on LRU or premature OOM.
> > > >
> > > > Stress test on other workloads also looking good.
> > > >
> > > > Suggested-by: Chen Ridong <chenridong@huaweicloud.com>
> > > > Signed-off-by: Kairui Song <kasong@tencent.com>
> > > > ---
> > > > mm/vmscan.c | 93 +++++++++++++++++++++++++++----------------------------------
> > > > 1 file changed, 41 insertions(+), 52 deletions(-)
> > > >
> > > > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > > > index 1783da54ada1..83c8fdf8fdc4 100644
> > > > --- a/mm/vmscan.c
> > > > +++ b/mm/vmscan.c
> > > > @@ -1942,6 +1942,44 @@ static int current_may_throttle(void)
> > > > return !(current->flags & PF_LOCAL_THROTTLE);
> > > > }
> > > > +static void handle_reclaim_writeback(unsigned long nr_taken,
> > > > + struct pglist_data *pgdat,
> > > > + struct scan_control *sc,
> > > > + struct reclaim_stat *stat)
> > > > +{
> > > > + /*
> > > > + * If dirty folios are scanned that are not queued for IO, it
> > > > + * implies that flushers are not doing their job. This can
> > > > + * happen when memory pressure pushes dirty folios to the end of
> > > > + * the LRU before the dirty limits are breached and the dirty
> > > > + * data has expired. It can also happen when the proportion of
> > > > + * dirty folios grows not through writes but through memory
> > > > + * pressure reclaiming all the clean cache. And in some cases,
> > > > + * the flushers simply cannot keep up with the allocation
> > > > + * rate. Nudge the flusher threads in case they are asleep.
> > > > + */
> > > > + if (stat->nr_unqueued_dirty == nr_taken && nr_taken) {
> > > > + wakeup_flusher_threads(WB_REASON_VMSCAN);
> > > > + /*
> > > > + * For cgroupv1 dirty throttling is achieved by waking up
> > > > + * the kernel flusher here and later waiting on folios
> > > > + * which are in writeback to finish (see shrink_folio_list()).
> > > > + *
> > > > + * Flusher may not be able to issue writeback quickly
> > > > + * enough for cgroupv1 writeback throttling to work
> > > > + * on a large system.
> > > > + */
> > > > + if (!writeback_throttling_sane(sc))
> > > > + reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
> > > > + }
> > > > +
> > > > + sc->nr.dirty += stat->nr_dirty;
> > > > + sc->nr.congested += stat->nr_congested;
> > > > + sc->nr.writeback += stat->nr_writeback;
> > > > + sc->nr.immediate += stat->nr_immediate;
> > > > + sc->nr.taken += nr_taken;
> > > > +}
> > > > +
> > > > /*
> > > > * shrink_inactive_list() is a helper for shrink_node(). It returns the number
> > > > * of reclaimed pages
> > > > @@ -2005,39 +2043,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
> > > > lruvec_lock_irq(lruvec);
> > > > lru_note_cost_unlock_irq(lruvec, file, stat.nr_pageout,
> > > > nr_scanned - nr_reclaimed);
> > > > -
> > > > - /*
> > > > - * If dirty folios are scanned that are not queued for IO, it
> > > > - * implies that flushers are not doing their job. This can
> > > > - * happen when memory pressure pushes dirty folios to the end of
> > > > - * the LRU before the dirty limits are breached and the dirty
> > > > - * data has expired. It can also happen when the proportion of
> > > > - * dirty folios grows not through writes but through memory
> > > > - * pressure reclaiming all the clean cache. And in some cases,
> > > > - * the flushers simply cannot keep up with the allocation
> > > > - * rate. Nudge the flusher threads in case they are asleep.
> > > > - */
> > > > - if (stat.nr_unqueued_dirty == nr_taken) {
> > > > - wakeup_flusher_threads(WB_REASON_VMSCAN);
> > > > - /*
> > > > - * For cgroupv1 dirty throttling is achieved by waking up
> > > > - * the kernel flusher here and later waiting on folios
> > > > - * which are in writeback to finish (see shrink_folio_list()).
> > > > - *
> > > > - * Flusher may not be able to issue writeback quickly
> > > > - * enough for cgroupv1 writeback throttling to work
> > > > - * on a large system.
> > > > - */
> > > > - if (!writeback_throttling_sane(sc))
> > > > - reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
> > > > - }
> > > > -
> > > > - sc->nr.dirty += stat.nr_dirty;
> > > > - sc->nr.congested += stat.nr_congested;
> > > > - sc->nr.writeback += stat.nr_writeback;
> > > > - sc->nr.immediate += stat.nr_immediate;
> > > > - sc->nr.taken += nr_taken;
> > > > -
> > > > + handle_reclaim_writeback(nr_taken, pgdat, sc, &stat);
> > > > trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
> > > > nr_scanned, nr_reclaimed, &stat, sc->priority, file);
> > > > return nr_reclaimed;
> > > > @@ -4651,9 +4657,6 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca
> > > > if (!folio_test_referenced(folio))
> > > > set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0);
> > > > - /* for shrink_folio_list() */
> > > > - folio_clear_reclaim(folio);
> > >
> > > IMO, Moving this change into patch 8 would make more sense. Otherwise LGTM.
> >
> > Thanks for the review! I made it a separate patch so we can better
> > identify which part had the performance gain, and patch 8 can keep
> > the review by. Patch 8 is still good without this, a few counters
> > are updated with no user, kind of wasted but that's harmless.
>
> I’m not referring to all the above changes. What I mean is that the
> 'folio_clear_reclaim' removal should belong to patch 8. Since
> shrink_folio_list() in patch 8 will handle the writeback logic,
> folio_clear_reclaim() should also be removed in the same patch.
Ah, that's a very good point then. Can move it in V3, thanks!
© 2016 - 2026 Red Hat, Inc.