In some functions removing cleanup paths allows to overall simplify it's
code, so replace cleanup paths with guard()s.
Signed-off-by: Gladyshev Ilya <foxido@foxido.dev>
---
fs/btrfs/extent-io-tree.c | 21 ++++-----
fs/btrfs/extent-tree.c | 96 ++++++++++++++++-----------------------
fs/btrfs/ordered-data.c | 46 ++++++++-----------
3 files changed, 68 insertions(+), 95 deletions(-)
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index 69ea2bd359a6..88d7aed7055f 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -890,9 +890,8 @@ bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
struct extent_state **cached_state)
{
struct extent_state *state;
- bool ret = false;
- spin_lock(&tree->lock);
+ guard(spinlock)(&tree->lock);
if (cached_state && *cached_state) {
state = *cached_state;
if (state->end == start - 1 && extent_state_in_tree(state)) {
@@ -911,23 +910,21 @@ bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
*cached_state = NULL;
if (state)
goto got_it;
- goto out;
+ return false;
}
btrfs_free_extent_state(*cached_state);
*cached_state = NULL;
}
state = find_first_extent_bit_state(tree, start, bits);
+ if (!state)
+ return false;
+
got_it:
- if (state) {
- cache_state_if_flags(state, cached_state, 0);
- *start_ret = state->start;
- *end_ret = state->end;
- ret = true;
- }
-out:
- spin_unlock(&tree->lock);
- return ret;
+ cache_state_if_flags(state, cached_state, 0);
+ *start_ret = state->start;
+ *end_ret = state->end;
+ return true;
}
/*
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f9744e456c6c..cb3d61d96e66 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1878,16 +1878,14 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
* and then re-check to make sure nobody got added.
*/
spin_unlock(&head->lock);
- spin_lock(&delayed_refs->lock);
- spin_lock(&head->lock);
- if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
- spin_unlock(&head->lock);
- spin_unlock(&delayed_refs->lock);
- return 1;
+ {
+ guard(spinlock)(&delayed_refs->lock);
+ guard(spinlock)(&head->lock);
+
+ if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op)
+ return 1;
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
}
- btrfs_delete_ref_head(fs_info, delayed_refs, head);
- spin_unlock(&head->lock);
- spin_unlock(&delayed_refs->lock);
if (head->must_insert_reserved) {
btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
@@ -3391,30 +3389,29 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
int ret = 0;
delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
- head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
- if (!head)
- goto out_delayed_unlock;
-
- spin_lock(&head->lock);
- if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
- goto out;
+ {
+ guard(spinlock)(&delayed_refs->lock);
+ head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
+ if (!head)
+ return 0;
- if (cleanup_extent_op(head) != NULL)
- goto out;
+ guard(spinlock)(&head->lock);
+ if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
+ return 0;
- /*
- * waiting for the lock here would deadlock. If someone else has it
- * locked they are already in the process of dropping it anyway
- */
- if (!mutex_trylock(&head->mutex))
- goto out;
+ if (cleanup_extent_op(head) != NULL)
+ return 0;
- btrfs_delete_ref_head(fs_info, delayed_refs, head);
- head->processing = false;
+ /*
+ * waiting for the lock here would deadlock. If someone else has it
+ * locked they are already in the process of dropping it anyway
+ */
+ if (!mutex_trylock(&head->mutex))
+ return 0;
- spin_unlock(&head->lock);
- spin_unlock(&delayed_refs->lock);
+ btrfs_delete_ref_head(fs_info, delayed_refs, head);
+ head->processing = false;
+ }
BUG_ON(head->extent_op);
if (head->must_insert_reserved)
@@ -3424,12 +3421,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
return ret;
-out:
- spin_unlock(&head->lock);
-
-out_delayed_unlock:
- spin_unlock(&delayed_refs->lock);
- return 0;
}
int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
@@ -3910,13 +3901,13 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
*/
}
- spin_lock(&space_info->lock);
- spin_lock(&block_group->lock);
- spin_lock(&fs_info->treelog_bg_lock);
- spin_lock(&fs_info->relocation_bg_lock);
+ guard(spinlock)(&space_info->lock);
+ guard(spinlock)(&block_group->lock);
+ guard(spinlock)(&fs_info->treelog_bg_lock);
+ guard(spinlock)(&fs_info->relocation_bg_lock);
if (ret)
- goto out;
+ goto err;
ASSERT(!ffe_ctl->for_treelog ||
block_group->start == fs_info->treelog_bg ||
@@ -3928,8 +3919,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
if (block_group->ro ||
(!ffe_ctl->for_data_reloc &&
test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
- ret = 1;
- goto out;
+ goto err;
}
/*
@@ -3938,8 +3928,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
*/
if (ffe_ctl->for_treelog && !fs_info->treelog_bg &&
(block_group->used || block_group->reserved)) {
- ret = 1;
- goto out;
+ goto err;
}
/*
@@ -3948,8 +3937,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
*/
if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg &&
(block_group->used || block_group->reserved)) {
- ret = 1;
- goto out;
+ goto err;
}
WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
@@ -3963,8 +3951,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
ffe_ctl->max_extent_size = avail;
ffe_ctl->total_free_space = avail;
}
- ret = 1;
- goto out;
+ goto err;
}
if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
@@ -4003,17 +3990,14 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
*/
ffe_ctl->search_start = ffe_ctl->found_offset;
+ return 0;
-out:
- if (ret && ffe_ctl->for_treelog)
+err:
+ if (ffe_ctl->for_treelog)
fs_info->treelog_bg = 0;
- if (ret && ffe_ctl->for_data_reloc)
+ if (ffe_ctl->for_data_reloc)
fs_info->data_reloc_bg = 0;
- spin_unlock(&fs_info->relocation_bg_lock);
- spin_unlock(&fs_info->treelog_bg_lock);
- spin_unlock(&block_group->lock);
- spin_unlock(&space_info->lock);
- return ret;
+ return 1;
}
static int do_allocation(struct btrfs_block_group *block_group,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 451b60de4550..4dbec4ef4ffd 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -995,35 +995,29 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- spin_lock_irq(&inode->ordered_tree_lock);
+ guard(spinlock_irq)(&inode->ordered_tree_lock);
node = ordered_tree_search(inode, file_offset);
if (!node) {
node = ordered_tree_search(inode, file_offset + len);
if (!node)
- goto out;
+ return NULL;
}
while (1) {
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
- if (btrfs_range_overlaps(entry, file_offset, len))
- break;
+ if (btrfs_range_overlaps(entry, file_offset, len)) {
+ refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_lookup_range(inode, entry);
+ return entry;
+ }
if (entry->file_offset >= file_offset + len) {
- entry = NULL;
- break;
+ return NULL;
}
- entry = NULL;
node = rb_next(node);
if (!node)
- break;
+ return NULL;
}
-out:
- if (entry) {
- refcount_inc(&entry->refs);
- trace_btrfs_ordered_extent_lookup_range(inode, entry);
- }
- spin_unlock_irq(&inode->ordered_tree_lock);
- return entry;
}
/*
@@ -1092,7 +1086,7 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
struct rb_node *next;
struct btrfs_ordered_extent *entry = NULL;
- spin_lock_irq(&inode->ordered_tree_lock);
+ guard(spinlock_irq)(&inode->ordered_tree_lock);
node = inode->ordered_tree.rb_node;
/*
* Here we don't want to use tree_search() which will use tree->last
@@ -1112,12 +1106,12 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
* Direct hit, got an ordered extent that starts at
* @file_offset
*/
- goto out;
+ goto ret_entry;
}
}
if (!entry) {
/* Empty tree */
- goto out;
+ return NULL;
}
cur = &entry->rb_node;
@@ -1132,22 +1126,20 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
if (prev) {
entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
if (btrfs_range_overlaps(entry, file_offset, len))
- goto out;
+ goto ret_entry;
}
if (next) {
entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
if (btrfs_range_overlaps(entry, file_offset, len))
- goto out;
+ goto ret_entry;
}
/* No ordered extent in the range */
- entry = NULL;
-out:
- if (entry) {
- refcount_inc(&entry->refs);
- trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
- }
+ return NULL;
+
+ret_entry:
+ refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
- spin_unlock_irq(&inode->ordered_tree_lock);
return entry;
}
--
2.51.1.dirty
在 2025/11/13 05:19, Gladyshev Ilya 写道:
> In some functions removing cleanup paths allows to overall simplify it's
> code, so replace cleanup paths with guard()s.
>
> Signed-off-by: Gladyshev Ilya <foxido@foxido.dev>
> ---
> fs/btrfs/extent-io-tree.c | 21 ++++-----
> fs/btrfs/extent-tree.c | 96 ++++++++++++++++-----------------------
> fs/btrfs/ordered-data.c | 46 ++++++++-----------
> 3 files changed, 68 insertions(+), 95 deletions(-)
>
> diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
> index 69ea2bd359a6..88d7aed7055f 100644
> --- a/fs/btrfs/extent-io-tree.c
> +++ b/fs/btrfs/extent-io-tree.c
> @@ -890,9 +890,8 @@ bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
> struct extent_state **cached_state)
> {
> struct extent_state *state;
> - bool ret = false;
>
> - spin_lock(&tree->lock);
> + guard(spinlock)(&tree->lock);
> if (cached_state && *cached_state) {
> state = *cached_state;
> if (state->end == start - 1 && extent_state_in_tree(state)) {
> @@ -911,23 +910,21 @@ bool btrfs_find_first_extent_bit(struct extent_io_tree *tree, u64 start,
> *cached_state = NULL;
> if (state)
> goto got_it;
> - goto out;
> + return false;
> }
> btrfs_free_extent_state(*cached_state);
> *cached_state = NULL;
> }
>
> state = find_first_extent_bit_state(tree, start, bits);
> + if (!state)
> + return false;
> +
> got_it:
> - if (state) {
> - cache_state_if_flags(state, cached_state, 0);
> - *start_ret = state->start;
> - *end_ret = state->end;
> - ret = true;
> - }
> -out:
> - spin_unlock(&tree->lock);
> - return ret;
> + cache_state_if_flags(state, cached_state, 0);
> + *start_ret = state->start;
> + *end_ret = state->end;
> + return true;
> }
>
> /*
> diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
> index f9744e456c6c..cb3d61d96e66 100644
> --- a/fs/btrfs/extent-tree.c
> +++ b/fs/btrfs/extent-tree.c
> @@ -1878,16 +1878,14 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
> * and then re-check to make sure nobody got added.
> */
> spin_unlock(&head->lock);
> - spin_lock(&delayed_refs->lock);
> - spin_lock(&head->lock);
> - if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
> - spin_unlock(&head->lock);
> - spin_unlock(&delayed_refs->lock);
> - return 1;
> + {
There are some internal discussion about such anonymous code block usage.
Although I support such usage, especially when it can reduce the
lifespan of local variables, it's not a commonly accepted pattern yet.
Thanks,
Qu
> + guard(spinlock)(&delayed_refs->lock);
> + guard(spinlock)(&head->lock);
> +
> + if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op)
> + return 1;
> + btrfs_delete_ref_head(fs_info, delayed_refs, head);
> }
> - btrfs_delete_ref_head(fs_info, delayed_refs, head);
> - spin_unlock(&head->lock);
> - spin_unlock(&delayed_refs->lock);
>
> if (head->must_insert_reserved) {
> btrfs_pin_extent(trans, head->bytenr, head->num_bytes, 1);
> @@ -3391,30 +3389,29 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
> int ret = 0;
>
> delayed_refs = &trans->transaction->delayed_refs;
> - spin_lock(&delayed_refs->lock);
> - head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
> - if (!head)
> - goto out_delayed_unlock;
> -
> - spin_lock(&head->lock);
> - if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
> - goto out;
> + {
> + guard(spinlock)(&delayed_refs->lock);
> + head = btrfs_find_delayed_ref_head(fs_info, delayed_refs, bytenr);
> + if (!head)
> + return 0;
>
> - if (cleanup_extent_op(head) != NULL)
> - goto out;
> + guard(spinlock)(&head->lock);
> + if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
> + return 0;
>
> - /*
> - * waiting for the lock here would deadlock. If someone else has it
> - * locked they are already in the process of dropping it anyway
> - */
> - if (!mutex_trylock(&head->mutex))
> - goto out;
> + if (cleanup_extent_op(head) != NULL)
> + return 0;
>
> - btrfs_delete_ref_head(fs_info, delayed_refs, head);
> - head->processing = false;
> + /*
> + * waiting for the lock here would deadlock. If someone else has it
> + * locked they are already in the process of dropping it anyway
> + */
> + if (!mutex_trylock(&head->mutex))
> + return 0;
>
> - spin_unlock(&head->lock);
> - spin_unlock(&delayed_refs->lock);
> + btrfs_delete_ref_head(fs_info, delayed_refs, head);
> + head->processing = false;
> + }
>
> BUG_ON(head->extent_op);
> if (head->must_insert_reserved)
> @@ -3424,12 +3421,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
> mutex_unlock(&head->mutex);
> btrfs_put_delayed_ref_head(head);
> return ret;
> -out:
> - spin_unlock(&head->lock);
> -
> -out_delayed_unlock:
> - spin_unlock(&delayed_refs->lock);
> - return 0;
> }
>
> int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
> @@ -3910,13 +3901,13 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
> */
> }
>
> - spin_lock(&space_info->lock);
> - spin_lock(&block_group->lock);
> - spin_lock(&fs_info->treelog_bg_lock);
> - spin_lock(&fs_info->relocation_bg_lock);
> + guard(spinlock)(&space_info->lock);
> + guard(spinlock)(&block_group->lock);
> + guard(spinlock)(&fs_info->treelog_bg_lock);
> + guard(spinlock)(&fs_info->relocation_bg_lock);
>
> if (ret)
> - goto out;
> + goto err;
>
> ASSERT(!ffe_ctl->for_treelog ||
> block_group->start == fs_info->treelog_bg ||
> @@ -3928,8 +3919,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
> if (block_group->ro ||
> (!ffe_ctl->for_data_reloc &&
> test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
> - ret = 1;
> - goto out;
> + goto err;
> }
>
> /*
> @@ -3938,8 +3928,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
> */
> if (ffe_ctl->for_treelog && !fs_info->treelog_bg &&
> (block_group->used || block_group->reserved)) {
> - ret = 1;
> - goto out;
> + goto err;
> }
>
> /*
> @@ -3948,8 +3937,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
> */
> if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg &&
> (block_group->used || block_group->reserved)) {
> - ret = 1;
> - goto out;
> + goto err;
> }
>
> WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity);
> @@ -3963,8 +3951,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
> ffe_ctl->max_extent_size = avail;
> ffe_ctl->total_free_space = avail;
> }
> - ret = 1;
> - goto out;
> + goto err;
> }
>
> if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
> @@ -4003,17 +3990,14 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
> */
>
> ffe_ctl->search_start = ffe_ctl->found_offset;
> + return 0;
>
> -out:
> - if (ret && ffe_ctl->for_treelog)
> +err:
> + if (ffe_ctl->for_treelog)
> fs_info->treelog_bg = 0;
> - if (ret && ffe_ctl->for_data_reloc)
> + if (ffe_ctl->for_data_reloc)
> fs_info->data_reloc_bg = 0;
> - spin_unlock(&fs_info->relocation_bg_lock);
> - spin_unlock(&fs_info->treelog_bg_lock);
> - spin_unlock(&block_group->lock);
> - spin_unlock(&space_info->lock);
> - return ret;
> + return 1;
> }
>
> static int do_allocation(struct btrfs_block_group *block_group,
> diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
> index 451b60de4550..4dbec4ef4ffd 100644
> --- a/fs/btrfs/ordered-data.c
> +++ b/fs/btrfs/ordered-data.c
> @@ -995,35 +995,29 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
> struct rb_node *node;
> struct btrfs_ordered_extent *entry = NULL;
>
> - spin_lock_irq(&inode->ordered_tree_lock);
> + guard(spinlock_irq)(&inode->ordered_tree_lock);
> node = ordered_tree_search(inode, file_offset);
> if (!node) {
> node = ordered_tree_search(inode, file_offset + len);
> if (!node)
> - goto out;
> + return NULL;
> }
>
> while (1) {
> entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
> - if (btrfs_range_overlaps(entry, file_offset, len))
> - break;
> + if (btrfs_range_overlaps(entry, file_offset, len)) {
> + refcount_inc(&entry->refs);
> + trace_btrfs_ordered_extent_lookup_range(inode, entry);
> + return entry;
> + }
>
> if (entry->file_offset >= file_offset + len) {
> - entry = NULL;
> - break;
> + return NULL;
> }
> - entry = NULL;
> node = rb_next(node);
> if (!node)
> - break;
> + return NULL;
> }
> -out:
> - if (entry) {
> - refcount_inc(&entry->refs);
> - trace_btrfs_ordered_extent_lookup_range(inode, entry);
> - }
> - spin_unlock_irq(&inode->ordered_tree_lock);
> - return entry;
> }
>
> /*
> @@ -1092,7 +1086,7 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
> struct rb_node *next;
> struct btrfs_ordered_extent *entry = NULL;
>
> - spin_lock_irq(&inode->ordered_tree_lock);
> + guard(spinlock_irq)(&inode->ordered_tree_lock);
> node = inode->ordered_tree.rb_node;
> /*
> * Here we don't want to use tree_search() which will use tree->last
> @@ -1112,12 +1106,12 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
> * Direct hit, got an ordered extent that starts at
> * @file_offset
> */
> - goto out;
> + goto ret_entry;
> }
> }
> if (!entry) {
> /* Empty tree */
> - goto out;
> + return NULL;
> }
>
> cur = &entry->rb_node;
> @@ -1132,22 +1126,20 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
> if (prev) {
> entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
> if (btrfs_range_overlaps(entry, file_offset, len))
> - goto out;
> + goto ret_entry;
> }
> if (next) {
> entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
> if (btrfs_range_overlaps(entry, file_offset, len))
> - goto out;
> + goto ret_entry;
> }
> /* No ordered extent in the range */
> - entry = NULL;
> -out:
> - if (entry) {
> - refcount_inc(&entry->refs);
> - trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
> - }
> + return NULL;
> +
> +ret_entry:
> + refcount_inc(&entry->refs);
> + trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
>
> - spin_unlock_irq(&inode->ordered_tree_lock);
> return entry;
> }
>
On Thu, Nov 13, 2025 at 07:20:01AM +1030, Qu Wenruo wrote:
> > --- a/fs/btrfs/extent-tree.c
> > +++ b/fs/btrfs/extent-tree.c
> > @@ -1878,16 +1878,14 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
> > * and then re-check to make sure nobody got added.
> > */
> > spin_unlock(&head->lock);
> > - spin_lock(&delayed_refs->lock);
> > - spin_lock(&head->lock);
> > - if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
> > - spin_unlock(&head->lock);
> > - spin_unlock(&delayed_refs->lock);
> > - return 1;
> > + {
>
> There are some internal discussion about such anonymous code block usage.
>
> Although I support such usage, especially when it can reduce the
> lifespan of local variables, it's not a commonly accepted pattern yet.
And the discussion is going great, I think we wont't find a consensus
without somebody either missing a coding pattern (you) or suffering to
look at such code each time (me). Others have similar mixed feelings
about the guards use.
On Thu, 13 Nov 2025 at 09:59, David Sterba <dsterba@suse.cz> wrote:
>
> On Thu, Nov 13, 2025 at 07:20:01AM +1030, Qu Wenruo wrote:
> > > --- a/fs/btrfs/extent-tree.c
> > > +++ b/fs/btrfs/extent-tree.c
> > > @@ -1878,16 +1878,14 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
> > > * and then re-check to make sure nobody got added.
> > > */
> > > spin_unlock(&head->lock);
> > > - spin_lock(&delayed_refs->lock);
> > > - spin_lock(&head->lock);
> > > - if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
> > > - spin_unlock(&head->lock);
> > > - spin_unlock(&delayed_refs->lock);
> > > - return 1;
> > > + {
> >
> > There are some internal discussion about such anonymous code block usage.
> >
> > Although I support such usage, especially when it can reduce the
> > lifespan of local variables, it's not a commonly accepted pattern yet.
>
> And the discussion is going great, I think we wont't find a consensus
> without somebody either missing a coding pattern (you) or suffering to
> look at such code each time (me). Others have similar mixed feelings
> about the guards use.
And yet I can imagine even wilder creativity like:
> + scoped_guard(spinlock, &delayed_refs->lock)
> + scoped_guard(spinlock, &head->lock) {
> + if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op)
> + return 1;
> + btrfs_delete_ref_head(fs_info, delayed_refs, head);
> }
Here the indentation is irregular, but still looks kind of just. Would
we be happy with such exceptions?
Otherwise this could end up rather mixed and that does not look
preferable, at least to me:
> + scoped_guard(spinlock, &delayed_refs->lock) {
> + guard(spinlock)(&head->lock)
> + if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op)
> + return 1;
> + btrfs_delete_ref_head(fs_info, delayed_refs, head);
> }
© 2016 - 2026 Red Hat, Inc.