Factor the loop body of xfsaild_push() into a separate
xfsaild_process_logitem() helper to improve readability.
This is a pure code movement with no functional change.
Signed-off-by: Yuto Ohnuki <ytohnuki@amazon.com>
---
fs/xfs/xfs_trans_ail.c | 127 ++++++++++++++++++++++-------------------
1 file changed, 69 insertions(+), 58 deletions(-)
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 63266d31b514..99a9bf3762b7 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -464,6 +464,74 @@ xfs_ail_calc_push_target(
return target_lsn;
}
+static void
+xfsaild_process_logitem(
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip,
+ int *stuck,
+ int *flushing)
+{
+ struct xfs_mount *mp = ailp->ail_log->l_mp;
+ uint type = lip->li_type;
+ unsigned long flags = lip->li_flags;
+ xfs_lsn_t item_lsn = lip->li_lsn;
+ int lock_result;
+
+ /*
+ * Note that iop_push may unlock and reacquire the AIL lock. We
+ * rely on the AIL cursor implementation to be able to deal with
+ * the dropped lock.
+ *
+ * The log item may have been freed by the push, so it must not
+ * be accessed or dereferenced below this line.
+ */
+ lock_result = xfsaild_push_item(ailp, lip);
+ switch (lock_result) {
+ case XFS_ITEM_SUCCESS:
+ XFS_STATS_INC(mp, xs_push_ail_success);
+ trace_xfs_ail_push(ailp, type, flags, item_lsn);
+
+ ailp->ail_last_pushed_lsn = item_lsn;
+ break;
+
+ case XFS_ITEM_FLUSHING:
+ /*
+ * The item or its backing buffer is already being
+ * flushed. The typical reason for that is that an
+ * inode buffer is locked because we already pushed the
+ * updates to it as part of inode clustering.
+ *
+ * We do not want to stop flushing just because lots
+ * of items are already being flushed, but we need to
+ * re-try the flushing relatively soon if most of the
+ * AIL is being flushed.
+ */
+ XFS_STATS_INC(mp, xs_push_ail_flushing);
+ trace_xfs_ail_flushing(ailp, type, flags, item_lsn);
+
+ (*flushing)++;
+ ailp->ail_last_pushed_lsn = item_lsn;
+ break;
+
+ case XFS_ITEM_PINNED:
+ XFS_STATS_INC(mp, xs_push_ail_pinned);
+ trace_xfs_ail_pinned(ailp, type, flags, item_lsn);
+
+ (*stuck)++;
+ ailp->ail_log_flush++;
+ break;
+ case XFS_ITEM_LOCKED:
+ XFS_STATS_INC(mp, xs_push_ail_locked);
+ trace_xfs_ail_locked(ailp, type, flags, item_lsn);
+
+ (*stuck)++;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
static long
xfsaild_push(
struct xfs_ail *ailp)
@@ -511,68 +579,11 @@ xfsaild_push(
lsn = lip->li_lsn;
while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
- int lock_result;
- uint type = lip->li_type;
- unsigned long flags = lip->li_flags;
- xfs_lsn_t item_lsn = lip->li_lsn;
if (test_bit(XFS_LI_FLUSHING, &lip->li_flags))
goto next_item;
- /*
- * Note that iop_push may unlock and reacquire the AIL lock. We
- * rely on the AIL cursor implementation to be able to deal with
- * the dropped lock.
- *
- * The log item may have been freed by the push, so it must not
- * be accessed or dereferenced below this line.
- */
- lock_result = xfsaild_push_item(ailp, lip);
- switch (lock_result) {
- case XFS_ITEM_SUCCESS:
- XFS_STATS_INC(mp, xs_push_ail_success);
- trace_xfs_ail_push(ailp, type, flags, item_lsn);
-
- ailp->ail_last_pushed_lsn = item_lsn;
- break;
-
- case XFS_ITEM_FLUSHING:
- /*
- * The item or its backing buffer is already being
- * flushed. The typical reason for that is that an
- * inode buffer is locked because we already pushed the
- * updates to it as part of inode clustering.
- *
- * We do not want to stop flushing just because lots
- * of items are already being flushed, but we need to
- * re-try the flushing relatively soon if most of the
- * AIL is being flushed.
- */
- XFS_STATS_INC(mp, xs_push_ail_flushing);
- trace_xfs_ail_flushing(ailp, type, flags, item_lsn);
-
- flushing++;
- ailp->ail_last_pushed_lsn = item_lsn;
- break;
-
- case XFS_ITEM_PINNED:
- XFS_STATS_INC(mp, xs_push_ail_pinned);
- trace_xfs_ail_pinned(ailp, type, flags, item_lsn);
-
- stuck++;
- ailp->ail_log_flush++;
- break;
- case XFS_ITEM_LOCKED:
- XFS_STATS_INC(mp, xs_push_ail_locked);
- trace_xfs_ail_locked(ailp, type, flags, item_lsn);
-
- stuck++;
- break;
- default:
- ASSERT(0);
- break;
- }
-
+ xfsaild_process_logitem(ailp, lip, &stuck, &flushing);
count++;
/*
--
2.50.1
Amazon Web Services EMEA SARL, 38 avenue John F. Kennedy, L-1855 Luxembourg, R.C.S. Luxembourg B186284
Amazon Web Services EMEA SARL, Irish Branch, One Burlington Plaza, Burlington Road, Dublin 4, Ireland, branch registration number 908705
On Tue, Mar 10, 2026 at 06:38:40PM +0000, Yuto Ohnuki wrote:
> Factor the loop body of xfsaild_push() into a separate
> xfsaild_process_logitem() helper to improve readability.
>
> This is a pure code movement with no functional change.
>
> Signed-off-by: Yuto Ohnuki <ytohnuki@amazon.com>
Neat hoist!
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
--D
> ---
> fs/xfs/xfs_trans_ail.c | 127 ++++++++++++++++++++++-------------------
> 1 file changed, 69 insertions(+), 58 deletions(-)
>
> diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
> index 63266d31b514..99a9bf3762b7 100644
> --- a/fs/xfs/xfs_trans_ail.c
> +++ b/fs/xfs/xfs_trans_ail.c
> @@ -464,6 +464,74 @@ xfs_ail_calc_push_target(
> return target_lsn;
> }
>
> +static void
> +xfsaild_process_logitem(
> + struct xfs_ail *ailp,
> + struct xfs_log_item *lip,
> + int *stuck,
> + int *flushing)
> +{
> + struct xfs_mount *mp = ailp->ail_log->l_mp;
> + uint type = lip->li_type;
> + unsigned long flags = lip->li_flags;
> + xfs_lsn_t item_lsn = lip->li_lsn;
> + int lock_result;
> +
> + /*
> + * Note that iop_push may unlock and reacquire the AIL lock. We
> + * rely on the AIL cursor implementation to be able to deal with
> + * the dropped lock.
> + *
> + * The log item may have been freed by the push, so it must not
> + * be accessed or dereferenced below this line.
> + */
> + lock_result = xfsaild_push_item(ailp, lip);
> + switch (lock_result) {
> + case XFS_ITEM_SUCCESS:
> + XFS_STATS_INC(mp, xs_push_ail_success);
> + trace_xfs_ail_push(ailp, type, flags, item_lsn);
> +
> + ailp->ail_last_pushed_lsn = item_lsn;
> + break;
> +
> + case XFS_ITEM_FLUSHING:
> + /*
> + * The item or its backing buffer is already being
> + * flushed. The typical reason for that is that an
> + * inode buffer is locked because we already pushed the
> + * updates to it as part of inode clustering.
> + *
> + * We do not want to stop flushing just because lots
> + * of items are already being flushed, but we need to
> + * re-try the flushing relatively soon if most of the
> + * AIL is being flushed.
> + */
> + XFS_STATS_INC(mp, xs_push_ail_flushing);
> + trace_xfs_ail_flushing(ailp, type, flags, item_lsn);
> +
> + (*flushing)++;
> + ailp->ail_last_pushed_lsn = item_lsn;
> + break;
> +
> + case XFS_ITEM_PINNED:
> + XFS_STATS_INC(mp, xs_push_ail_pinned);
> + trace_xfs_ail_pinned(ailp, type, flags, item_lsn);
> +
> + (*stuck)++;
> + ailp->ail_log_flush++;
> + break;
> + case XFS_ITEM_LOCKED:
> + XFS_STATS_INC(mp, xs_push_ail_locked);
> + trace_xfs_ail_locked(ailp, type, flags, item_lsn);
> +
> + (*stuck)++;
> + break;
> + default:
> + ASSERT(0);
> + break;
> + }
> +}
> +
> static long
> xfsaild_push(
> struct xfs_ail *ailp)
> @@ -511,68 +579,11 @@ xfsaild_push(
>
> lsn = lip->li_lsn;
> while ((XFS_LSN_CMP(lip->li_lsn, ailp->ail_target) <= 0)) {
> - int lock_result;
> - uint type = lip->li_type;
> - unsigned long flags = lip->li_flags;
> - xfs_lsn_t item_lsn = lip->li_lsn;
>
> if (test_bit(XFS_LI_FLUSHING, &lip->li_flags))
> goto next_item;
>
> - /*
> - * Note that iop_push may unlock and reacquire the AIL lock. We
> - * rely on the AIL cursor implementation to be able to deal with
> - * the dropped lock.
> - *
> - * The log item may have been freed by the push, so it must not
> - * be accessed or dereferenced below this line.
> - */
> - lock_result = xfsaild_push_item(ailp, lip);
> - switch (lock_result) {
> - case XFS_ITEM_SUCCESS:
> - XFS_STATS_INC(mp, xs_push_ail_success);
> - trace_xfs_ail_push(ailp, type, flags, item_lsn);
> -
> - ailp->ail_last_pushed_lsn = item_lsn;
> - break;
> -
> - case XFS_ITEM_FLUSHING:
> - /*
> - * The item or its backing buffer is already being
> - * flushed. The typical reason for that is that an
> - * inode buffer is locked because we already pushed the
> - * updates to it as part of inode clustering.
> - *
> - * We do not want to stop flushing just because lots
> - * of items are already being flushed, but we need to
> - * re-try the flushing relatively soon if most of the
> - * AIL is being flushed.
> - */
> - XFS_STATS_INC(mp, xs_push_ail_flushing);
> - trace_xfs_ail_flushing(ailp, type, flags, item_lsn);
> -
> - flushing++;
> - ailp->ail_last_pushed_lsn = item_lsn;
> - break;
> -
> - case XFS_ITEM_PINNED:
> - XFS_STATS_INC(mp, xs_push_ail_pinned);
> - trace_xfs_ail_pinned(ailp, type, flags, item_lsn);
> -
> - stuck++;
> - ailp->ail_log_flush++;
> - break;
> - case XFS_ITEM_LOCKED:
> - XFS_STATS_INC(mp, xs_push_ail_locked);
> - trace_xfs_ail_locked(ailp, type, flags, item_lsn);
> -
> - stuck++;
> - break;
> - default:
> - ASSERT(0);
> - break;
> - }
> -
> + xfsaild_process_logitem(ailp, lip, &stuck, &flushing);
> count++;
>
> /*
> --
> 2.50.1
>
>
>
>
> Amazon Web Services EMEA SARL, 38 avenue John F. Kennedy, L-1855 Luxembourg, R.C.S. Luxembourg B186284
>
> Amazon Web Services EMEA SARL, Irish Branch, One Burlington Plaza, Burlington Road, Dublin 4, Ireland, branch registration number 908705
>
>
>
>
© 2016 - 2026 Red Hat, Inc.