Factor the loop body of xfsaild_push() into a separate
xfsaild_process_logitem() helper to improve readability.
This is a pure code movement with no functional change. The
subsequent patch to fix a use-after-free in the AIL push path
depends on this refactoring.
Cc: <stable@vger.kernel.org> # v5.9
Signed-off-by: Yuto Ohnuki <ytohnuki@amazon.com>
---
fs/xfs/xfs_trans_ail.c | 116 +++++++++++++++++++++++------------------
1 file changed, 64 insertions(+), 52 deletions(-)
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 923729af4206..ac747804e1d6 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -458,6 +458,69 @@ xfs_ail_calc_push_target(
return target_lsn;
}
+static void
+xfsaild_process_logitem(
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn,
+ int *stuck,
+ int *flushing)
+{
+ struct xfs_mount *mp = ailp->ail_log->l_mp;
+ int lock_result;
+
+ /*
+ * Note that iop_push may unlock and reacquire the AIL lock. We
+ * rely on the AIL cursor implementation to be able to deal with
+ * the dropped lock.
+ */
+ lock_result = xfsaild_push_item(ailp, lip);
+ switch (lock_result) {
+ case XFS_ITEM_SUCCESS:
+ XFS_STATS_INC(mp, xs_push_ail_success);
+ trace_xfs_ail_push(lip);
+
+ ailp->ail_last_pushed_lsn = lsn;
+ break;
+
+ case XFS_ITEM_FLUSHING:
+ /*
+ * The item or its backing buffer is already being
+ * flushed. The typical reason for that is that an
+ * inode buffer is locked because we already pushed the
+ * updates to it as part of inode clustering.
+ *
+ * We do not want to stop flushing just because lots
+ * of items are already being flushed, but we need to
+ * re-try the flushing relatively soon if most of the
+ * AIL is being flushed.
+ */
+ XFS_STATS_INC(mp, xs_push_ail_flushing);
+ trace_xfs_ail_flushing(lip);
+
+ (*flushing)++;
+ ailp->ail_last_pushed_lsn = lsn;
+ break;
+
+ case XFS_ITEM_PINNED:
+ XFS_STATS_INC(mp, xs_push_ail_pinned);
+ trace_xfs_ail_pinned(lip);
+
+ (*stuck)++;
+ ailp->ail_log_flush++;
+ break;
+ case XFS_ITEM_LOCKED:
+ XFS_STATS_INC(mp, xs_push_ail_locked);
+ trace_xfs_ail_locked(lip);
+
+ (*stuck)++;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
static long
xfsaild_push(
struct xfs_ail *ailp)
@@ -505,62 +568,11 @@ xfsaild_push(
lsn = lip->li_lsn;
while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
- int lock_result;
if (test_bit(XFS_LI_FLUSHING, &lip->li_flags))
goto next_item;
- /*
- * Note that iop_push may unlock and reacquire the AIL lock. We
- * rely on the AIL cursor implementation to be able to deal with
- * the dropped lock.
- */
- lock_result = xfsaild_push_item(ailp, lip);
- switch (lock_result) {
- case XFS_ITEM_SUCCESS:
- XFS_STATS_INC(mp, xs_push_ail_success);
- trace_xfs_ail_push(lip);
-
- ailp->ail_last_pushed_lsn = lsn;
- break;
-
- case XFS_ITEM_FLUSHING:
- /*
- * The item or its backing buffer is already being
- * flushed. The typical reason for that is that an
- * inode buffer is locked because we already pushed the
- * updates to it as part of inode clustering.
- *
- * We do not want to stop flushing just because lots
- * of items are already being flushed, but we need to
- * re-try the flushing relatively soon if most of the
- * AIL is being flushed.
- */
- XFS_STATS_INC(mp, xs_push_ail_flushing);
- trace_xfs_ail_flushing(lip);
-
- flushing++;
- ailp->ail_last_pushed_lsn = lsn;
- break;
-
- case XFS_ITEM_PINNED:
- XFS_STATS_INC(mp, xs_push_ail_pinned);
- trace_xfs_ail_pinned(lip);
-
- stuck++;
- ailp->ail_log_flush++;
- break;
- case XFS_ITEM_LOCKED:
- XFS_STATS_INC(mp, xs_push_ail_locked);
- trace_xfs_ail_locked(lip);
-
- stuck++;
- break;
- default:
- ASSERT(0);
- break;
- }
-
+ xfsaild_process_logitem(ailp, lip, lsn, &stuck, &flushing);
count++;
/*
--
2.50.1
Amazon Web Services EMEA SARL, 38 avenue John F. Kennedy, L-1855 Luxembourg, R.C.S. Luxembourg B186284
Amazon Web Services EMEA SARL, Irish Branch, One Burlington Plaza, Burlington Road, Dublin 4, Ireland, branch registration number 908705
On Sun, Mar 08, 2026 at 06:28:07PM +0000, Yuto Ohnuki wrote: > Factor the loop body of xfsaild_push() into a separate > xfsaild_process_logitem() helper to improve readability. > > This is a pure code movement with no functional change. The > subsequent patch to fix a use-after-free in the AIL push path > depends on this refactoring. > > Cc: <stable@vger.kernel.org> # v5.9 > Signed-off-by: Yuto Ohnuki <ytohnuki@amazon.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> -- Dave Chinner dgc@kernel.org
> > Factor the loop body of xfsaild_push() into a separate > > xfsaild_process_logitem() helper to improve readability. > > > > This is a pure code movement with no functional change. The > > subsequent patch to fix a use-after-free in the AIL push path > > depends on this refactoring. > > > > Cc: <stable@vger.kernel.org> # v5.9 > > Signed-off-by: Yuto Ohnuki <ytohnuki@amazon.com> > > Reviewed-by: Dave Chinner <dchinner@redhat.com> Thanks for the review, Dave. In v4, I reworked the patch ordering so that the bugfix patches don't depend on the refactoring patch, reducing the stable backport burden. Since the context has changed slightly, I've dropped your Reviewed-by from this patch in v4 just to be safe. I would appreciate it if you could take another look when you get a chance. Yuto Amazon Web Services EMEA SARL, 38 avenue John F. Kennedy, L-1855 Luxembourg, R.C.S. Luxembourg B186284 Amazon Web Services EMEA SARL, Irish Branch, One Burlington Plaza, Burlington Road, Dublin 4, Ireland, branch registration number 908705
On Sun, Mar 08, 2026 at 06:28:07PM +0000, Yuto Ohnuki wrote:
> Factor the loop body of xfsaild_push() into a separate
> xfsaild_process_logitem() helper to improve readability.
>
> This is a pure code movement with no functional change. The
> subsequent patch to fix a use-after-free in the AIL push path
> depends on this refactoring.
>
> Cc: <stable@vger.kernel.org> # v5.9
> Signed-off-by: Yuto Ohnuki <ytohnuki@amazon.com>
Seems like a reasonable hoist to reduce the length of the function, but
in ordering the patches this way (cleanup, then bugfixes) the hoist also
has to be backported to 5.10/5.15/6.1/6.6/6.12/6.18/6.19.
--D
> ---
> fs/xfs/xfs_trans_ail.c | 116 +++++++++++++++++++++++------------------
> 1 file changed, 64 insertions(+), 52 deletions(-)
>
> diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
> index 923729af4206..ac747804e1d6 100644
> --- a/fs/xfs/xfs_trans_ail.c
> +++ b/fs/xfs/xfs_trans_ail.c
> @@ -458,6 +458,69 @@ xfs_ail_calc_push_target(
> return target_lsn;
> }
>
> +static void
> +xfsaild_process_logitem(
> + struct xfs_ail *ailp,
> + struct xfs_log_item *lip,
> + xfs_lsn_t lsn,
> + int *stuck,
> + int *flushing)
> +{
> + struct xfs_mount *mp = ailp->ail_log->l_mp;
> + int lock_result;
> +
> + /*
> + * Note that iop_push may unlock and reacquire the AIL lock. We
> + * rely on the AIL cursor implementation to be able to deal with
> + * the dropped lock.
> + */
> + lock_result = xfsaild_push_item(ailp, lip);
> + switch (lock_result) {
> + case XFS_ITEM_SUCCESS:
> + XFS_STATS_INC(mp, xs_push_ail_success);
> + trace_xfs_ail_push(lip);
> +
> + ailp->ail_last_pushed_lsn = lsn;
> + break;
> +
> + case XFS_ITEM_FLUSHING:
> + /*
> + * The item or its backing buffer is already being
> + * flushed. The typical reason for that is that an
> + * inode buffer is locked because we already pushed the
> + * updates to it as part of inode clustering.
> + *
> + * We do not want to stop flushing just because lots
> + * of items are already being flushed, but we need to
> + * re-try the flushing relatively soon if most of the
> + * AIL is being flushed.
> + */
> + XFS_STATS_INC(mp, xs_push_ail_flushing);
> + trace_xfs_ail_flushing(lip);
> +
> + (*flushing)++;
> + ailp->ail_last_pushed_lsn = lsn;
> + break;
> +
> + case XFS_ITEM_PINNED:
> + XFS_STATS_INC(mp, xs_push_ail_pinned);
> + trace_xfs_ail_pinned(lip);
> +
> + (*stuck)++;
> + ailp->ail_log_flush++;
> + break;
> + case XFS_ITEM_LOCKED:
> + XFS_STATS_INC(mp, xs_push_ail_locked);
> + trace_xfs_ail_locked(lip);
> +
> + (*stuck)++;
> + break;
> + default:
> + ASSERT(0);
> + break;
> + }
> +}
> +
> static long
> xfsaild_push(
> struct xfs_ail *ailp)
> @@ -505,62 +568,11 @@ xfsaild_push(
>
> lsn = lip->li_lsn;
> while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
> - int lock_result;
>
> if (test_bit(XFS_LI_FLUSHING, &lip->li_flags))
> goto next_item;
>
> - /*
> - * Note that iop_push may unlock and reacquire the AIL lock. We
> - * rely on the AIL cursor implementation to be able to deal with
> - * the dropped lock.
> - */
> - lock_result = xfsaild_push_item(ailp, lip);
> - switch (lock_result) {
> - case XFS_ITEM_SUCCESS:
> - XFS_STATS_INC(mp, xs_push_ail_success);
> - trace_xfs_ail_push(lip);
> -
> - ailp->ail_last_pushed_lsn = lsn;
> - break;
> -
> - case XFS_ITEM_FLUSHING:
> - /*
> - * The item or its backing buffer is already being
> - * flushed. The typical reason for that is that an
> - * inode buffer is locked because we already pushed the
> - * updates to it as part of inode clustering.
> - *
> - * We do not want to stop flushing just because lots
> - * of items are already being flushed, but we need to
> - * re-try the flushing relatively soon if most of the
> - * AIL is being flushed.
> - */
> - XFS_STATS_INC(mp, xs_push_ail_flushing);
> - trace_xfs_ail_flushing(lip);
> -
> - flushing++;
> - ailp->ail_last_pushed_lsn = lsn;
> - break;
> -
> - case XFS_ITEM_PINNED:
> - XFS_STATS_INC(mp, xs_push_ail_pinned);
> - trace_xfs_ail_pinned(lip);
> -
> - stuck++;
> - ailp->ail_log_flush++;
> - break;
> - case XFS_ITEM_LOCKED:
> - XFS_STATS_INC(mp, xs_push_ail_locked);
> - trace_xfs_ail_locked(lip);
> -
> - stuck++;
> - break;
> - default:
> - ASSERT(0);
> - break;
> - }
> -
> + xfsaild_process_logitem(ailp, lip, lsn, &stuck, &flushing);
> count++;
>
> /*
> --
> 2.50.1
>
>
>
>
> Amazon Web Services EMEA SARL, 38 avenue John F. Kennedy, L-1855 Luxembourg, R.C.S. Luxembourg B186284
>
> Amazon Web Services EMEA SARL, Irish Branch, One Burlington Plaza, Burlington Road, Dublin 4, Ireland, branch registration number 908705
>
>
>
>
> Seems like a reasonable hoist to reduce the length of the function, but > in ordering the patches this way (cleanup, then bugfixes) the hoist also > has to be backported to 5.10/5.15/6.1/6.6/6.12/6.18/6.19. > > --D Thank you. In v4, I moved the refactoring to patch 4/4 after the bugfix patches and dropped the Cc: stable tag, so it no longer needs to be backported. Yuto Amazon Web Services EMEA SARL, 38 avenue John F. Kennedy, L-1855 Luxembourg, R.C.S. Luxembourg B186284 Amazon Web Services EMEA SARL, Irish Branch, One Burlington Plaza, Burlington Road, Dublin 4, Ireland, branch registration number 908705
© 2016 - 2026 Red Hat, Inc.