fs/bcachefs/alloc_background.c | 9 +++++---- fs/bcachefs/btree_iter.h | 5 ++--- fs/bcachefs/fsck.c | 5 +++-- fs/bcachefs/inode.c | 4 ++-- fs/bcachefs/io_write.c | 2 +- fs/bcachefs/lru.c | 2 +- fs/bcachefs/move.c | 4 +++- fs/bcachefs/movinggc.c | 3 ++- fs/bcachefs/quota.c | 3 ++- fs/bcachefs/subvolume.c | 5 +++-- 10 files changed, 24 insertions(+), 18 deletions(-)
From: Youling Tang <tangyouling@kylinos.cn>
- Reduces bkey_err() calls.
- Avoid redundant calls to bch2_trans_iter_exit() in some functions.
Signed-off-by: Youling Tang <tangyouling@kylinos.cn>
---
fs/bcachefs/alloc_background.c | 9 +++++----
fs/bcachefs/btree_iter.h | 5 ++---
fs/bcachefs/fsck.c | 5 +++--
fs/bcachefs/inode.c | 4 ++--
fs/bcachefs/io_write.c | 2 +-
fs/bcachefs/lru.c | 2 +-
fs/bcachefs/move.c | 4 +++-
fs/bcachefs/movinggc.c | 3 ++-
fs/bcachefs/quota.c | 3 ++-
fs/bcachefs/subvolume.c | 5 +++--
10 files changed, 24 insertions(+), 18 deletions(-)
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index fd3a2522bc3e..9ac5c0a62041 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -460,7 +460,7 @@ bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_i
BTREE_ITER_intent);
int ret = bkey_err(k);
if (unlikely(ret))
- return ERR_PTR(ret);
+ goto err;
struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k);
ret = PTR_ERR_OR_ZERO(a);
@@ -693,7 +693,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
BTREE_ITER_intent);
ret = bkey_err(old);
if (ret)
- return ret;
+ goto err;
if (ca->mi.freespace_initialized &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
@@ -738,7 +738,7 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
BTREE_ITER_with_updates);
ret = bkey_err(k);
if (ret)
- return ret;
+ goto out;
if (k.k->type != KEY_TYPE_bucket_gens) {
bkey_bucket_gens_init(&g->k_i);
@@ -750,6 +750,7 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
g->v.gens[offset] = gen;
ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
+out:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -1373,7 +1374,7 @@ static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_tran
alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
ret = bkey_err(alloc_k);
if (ret)
- return ret;
+ goto out;
if (fsck_err_on(!bch2_dev_bucket_exists(c, pos),
trans, need_discard_freespace_key_to_invalid_dev_bucket,
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index dca62375d7d3..7fbc5a796c4c 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -552,8 +552,7 @@ static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
if (!bkey_err(k) && type && k.k->type != type)
k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
- if (unlikely(bkey_err(k)))
- bch2_trans_iter_exit(trans, iter);
+
return k;
}
@@ -586,9 +585,9 @@ static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
memcpy(val, k.v, b);
if (unlikely(b < sizeof(*val)))
memset((void *) val + b, 0, sizeof(*val) - b);
- bch2_trans_iter_exit(trans, &iter);
}
+ bch2_trans_iter_exit(trans, &iter);
return ret;
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 9138944c5ae6..83a343f1e186 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -930,7 +930,7 @@ static int check_inode_dirent_inode(struct btree_trans *trans, struct bkey_s_c i
struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot);
int ret = bkey_err(d);
if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
+ goto err;
if (fsck_err_on(ret,
trans, inode_points_to_missing_dirent,
@@ -955,6 +955,7 @@ static int check_inode_dirent_inode(struct btree_trans *trans, struct bkey_s_c i
}
ret = 0;
+err:
fsck_err:
bch2_trans_iter_exit(trans, &dirent_iter);
printbuf_exit(&buf);
@@ -1971,7 +1972,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
0, subvolume);
ret = bkey_err(s.s_c);
if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
+ goto out;
if (ret) {
if (fsck_err(trans, dirent_to_missing_subvol,
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 2be6be33afa3..6cb4028f6f60 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -343,7 +343,7 @@ int bch2_inode_peek_nowarn(struct btree_trans *trans,
flags|BTREE_ITER_cached);
ret = bkey_err(k);
if (ret)
- return ret;
+ goto err;
ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
if (ret)
@@ -1081,7 +1081,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached);
ret = bkey_err(k);
if (ret)
- return ret;
+ goto out;
ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
if (fsck_err_on(!bkey_is_inode(k.k),
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 1d4761d15002..3ba374b47f4b 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -219,7 +219,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
BTREE_ITER_cached);
int ret = bkey_err(k);
if (unlikely(ret))
- return ret;
+ goto err;
/*
* varint_decode_fast(), in the inode .invalid method, reads up to 7
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
index 96f2f4f8c397..40bc732e996f 100644
--- a/fs/bcachefs/lru.c
+++ b/fs/bcachefs/lru.c
@@ -91,7 +91,7 @@ int bch2_lru_check_set(struct btree_trans *trans,
time), 0);
int ret = bkey_err(lru_k);
if (ret)
- return ret;
+ goto err;
if (lru_k.k->type != KEY_TYPE_set) {
ret = bch2_btree_write_buffer_maybe_flush(trans, referring_k, last_flushed);
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 7d3920e03742..4148fc312ab2 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -439,8 +439,10 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans,
SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot),
BTREE_ITER_cached);
ret = bkey_err(k);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+ bch2_trans_iter_exit(trans, &iter);
return ret;
+ }
if (!ret && bkey_is_inode(k.k)) {
struct bch_inode_unpacked inode;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index deef4f024d20..7ee2e74eb7fc 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -88,7 +88,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
b->k.bucket, BTREE_ITER_cached);
ret = bkey_err(k);
if (ret)
- return ret;
+ goto out;
a = bch2_alloc_to_v4(k, &_a);
b->k.gen = a->gen;
@@ -98,6 +98,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
a->fragmentation_lru &&
a->fragmentation_lru <= time;
+out:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index c32a05e252e2..504d39a9da65 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -828,7 +828,7 @@ static int bch2_set_quota_trans(struct btree_trans *trans,
BTREE_ITER_slots|BTREE_ITER_intent);
ret = bkey_err(k);
if (unlikely(ret))
- return ret;
+ goto err;
if (k.k->type == KEY_TYPE_quota)
new_quota->v = *bkey_s_c_to_quota(k).v;
@@ -844,6 +844,7 @@ static int bch2_set_quota_trans(struct btree_trans *trans,
new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
+err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index dbe834cb349f..c5358f452fc1 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -419,12 +419,13 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
"missing subvolume %u", subvolid);
if (ret)
- return ret;
+ goto err;
snapid = le32_to_cpu(subvol.v->snapshot);
ret = bch2_btree_delete_at(trans, &iter, 0) ?:
bch2_snapshot_node_set_deleted(trans, snapid);
+err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -648,7 +649,7 @@ static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
ret = bkey_err(k);
if (ret)
- return ret;
+ goto err;
if (!bkey_is_inode(k.k)) {
bch_err(trans->c, "root inode not found");
--
2.34.1
On Fri, Aug 23, 2024 at 11:19:55AM GMT, Youling Tang wrote: > From: Youling Tang <tangyouling@kylinos.cn> > > - Reduces bkey_err() calls. > - Avoid redundant calls to bch2_trans_iter_exit() in some functions. no, a function that returns an error should clean up after itself
On 23/08/2024 11:55, Kent Overstreet wrote: > On Fri, Aug 23, 2024 at 11:19:55AM GMT, Youling Tang wrote: >> From: Youling Tang <tangyouling@kylinos.cn> >> >> - Reduces bkey_err() calls. >> - Avoid redundant calls to bch2_trans_iter_exit() in some functions. > no, a function that returns an error should clean up after itself Yes, functions should self-clean when they fail. However, there are repeated calls to bch2_trans_iter_exit in some functions, take lookup_inode() as an example, When bkey_err(k) returns a non-zero, call bch2_trans_iter_exit() once in bch2_bkey_get_iter(). It is then called again in lookup_inode() via 'goto err'. (We can correct it by simply changing it to 'return ret', but there are many similar cases.) Thanks, Youling.
On Fri, Aug 23, 2024 at 02:07:20PM GMT, Youling Tang wrote: > On 23/08/2024 11:55, Kent Overstreet wrote: > > On Fri, Aug 23, 2024 at 11:19:55AM GMT, Youling Tang wrote: > > > From: Youling Tang <tangyouling@kylinos.cn> > > > > > > - Reduces bkey_err() calls. > > > - Avoid redundant calls to bch2_trans_iter_exit() in some functions. > > no, a function that returns an error should clean up after itself > Yes, functions should self-clean when they fail. > > However, there are repeated calls to bch2_trans_iter_exit in > some functions, take lookup_inode() as an example, > > When bkey_err(k) returns a non-zero, call bch2_trans_iter_exit() > once in bch2_bkey_get_iter(). It is then called again in > lookup_inode() via 'goto err'. (We can correct it by simply changing > it to 'return ret', but there are many similar cases.) I'm aware, but I'm not looking to microoptimize at the expense of making the code more fragile and less clear, especially right now when the priority is stabilizing and fixing bugs. If you were also doing performance testing and could show that it makes a measurable difference I'd consider it. Did you even look at the assembly output for any of these functions? CSE might be optimizing away the redundant calls.
On 2024/8/23 22:51, Kent Overstreet wrote: > On Fri, Aug 23, 2024 at 02:07:20PM GMT, Youling Tang wrote: >> On 23/08/2024 11:55, Kent Overstreet wrote: >>> On Fri, Aug 23, 2024 at 11:19:55AM GMT, Youling Tang wrote: >>>> From: Youling Tang <tangyouling@kylinos.cn> >>>> >>>> - Reduces bkey_err() calls. >>>> - Avoid redundant calls to bch2_trans_iter_exit() in some functions. >>> no, a function that returns an error should clean up after itself >> Yes, functions should self-clean when they fail. >> >> However, there are repeated calls to bch2_trans_iter_exit in >> some functions, take lookup_inode() as an example, >> >> When bkey_err(k) returns a non-zero, call bch2_trans_iter_exit() >> once in bch2_bkey_get_iter(). It is then called again in >> lookup_inode() via 'goto err'. (We can correct it by simply changing >> it to 'return ret', but there are many similar cases.) > I'm aware, but I'm not looking to microoptimize at the expense of making > the code more fragile and less clear, especially right now when the > priority is stabilizing and fixing bugs. > > If you were also doing performance testing and could show that it > makes a measurable difference I'd consider it. Did you even look at the > assembly output for any of these functions? CSE might be optimizing away > the redundant calls. I haven't performed the corresponding performance testing. Looking at the assembly code, taking `lookup_inode()` as an example, Before the patch, 142f: 74 96 je 13c7 <lookup_inode+0x117> 1431: 48 8d b5 68 ff ff ff lea -0x98(%rbp),%rsi 1438: 4c 89 e7 mov %r12,%rdi 143b: e8 00 00 00 00 call 1440 <lookup_inode+0x190> 1440: eb b4 jmp 13f6 <lookup_inode+0x146> 1442: e8 00 00 00 00 call 1447 <lookup_inode+0x197> 1447: 66 0f 1f 84 00 00 00 nopw 0x0(%rax,%rax,1) 144e: 00 00 After, 111f: 74 96 je 10b7 <lookup_inode+0x117> 1121: eb c3 jmp 10e6 <lookup_inode+0x146> 1123: e8 00 00 00 00 call 1128 <lookup_inode+0x188> 1128: 0f 1f 84 00 00 00 00 nopl 0x0(%rax,%rax,1) 112f: 00 The following three assembly instructions have been reduced, 1431: 48 8d b5 68 ff ff ff lea -0x98(%rbp),%rsi 1438: 4c 89 e7 mov %r12,%rdi 143b: e8 00 00 00 00 call 1440 <lookup_inode+0x190>
On 2024/8/30 9:19, Youling Tang wrote: > On 2024/8/23 22:51, Kent Overstreet wrote: > >> On Fri, Aug 23, 2024 at 02:07:20PM GMT, Youling Tang wrote: >>> On 23/08/2024 11:55, Kent Overstreet wrote: >>>> On Fri, Aug 23, 2024 at 11:19:55AM GMT, Youling Tang wrote: >>>>> From: Youling Tang <tangyouling@kylinos.cn> >>>>> >>>>> - Reduces bkey_err() calls. >>>>> - Avoid redundant calls to bch2_trans_iter_exit() in some functions. >>>> no, a function that returns an error should clean up after itself >>> Yes, functions should self-clean when they fail. >>> >>> However, there are repeated calls to bch2_trans_iter_exit in >>> some functions, take lookup_inode() as an example, >>> >>> When bkey_err(k) returns a non-zero, call bch2_trans_iter_exit() >>> once in bch2_bkey_get_iter(). It is then called again in >>> lookup_inode() via 'goto err'. (We can correct it by simply changing >>> it to 'return ret', but there are many similar cases.) >> I'm aware, but I'm not looking to microoptimize at the expense of making >> the code more fragile and less clear, especially right now when the >> priority is stabilizing and fixing bugs. >> >> If you were also doing performance testing and could show that it >> makes a measurable difference I'd consider it. Did you even look at the >> assembly output for any of these functions? CSE might be optimizing away >> the redundant calls. > I haven't performed the corresponding performance testing. Looking at > the assembly code, taking `lookup_inode()` as an example, > I think his point is that your modification is error-free, but improving readability is not a high priority at the moment unless it actually affects performance. There might currently be a shortage of manpower, with many features to be completed and bugs to be fixed. > Before the patch, > 142f: 74 96 je 13c7 <lookup_inode+0x117> > 1431: 48 8d b5 68 ff ff ff lea -0x98(%rbp),%rsi > 1438: 4c 89 e7 mov %r12,%rdi > 143b: e8 00 00 00 00 call 1440 <lookup_inode+0x190> > 1440: eb b4 jmp 13f6 <lookup_inode+0x146> > 1442: e8 00 00 00 00 call 1447 <lookup_inode+0x197> > 1447: 66 0f 1f 84 00 00 00 nopw 0x0(%rax,%rax,1) > 144e: 00 00 > > After, > 111f: 74 96 je 10b7 <lookup_inode+0x117> > 1121: eb c3 jmp 10e6 <lookup_inode+0x146> > 1123: e8 00 00 00 00 call 1128 <lookup_inode+0x188> > 1128: 0f 1f 84 00 00 00 00 nopl 0x0(%rax,%rax,1) > 112f: 00 > > The following three assembly instructions have been reduced, > 1431: 48 8d b5 68 ff ff ff lea -0x98(%rbp),%rsi > 1438: 4c 89 e7 mov %r12,%rdi > 143b: e8 00 00 00 00 call 1440 <lookup_inode+0x190> >
On 2024/8/30 11:48, Hongbo Li wrote: > > > On 2024/8/30 9:19, Youling Tang wrote: >> On 2024/8/23 22:51, Kent Overstreet wrote: >> >>> On Fri, Aug 23, 2024 at 02:07:20PM GMT, Youling Tang wrote: >>>> On 23/08/2024 11:55, Kent Overstreet wrote: >>>>> On Fri, Aug 23, 2024 at 11:19:55AM GMT, Youling Tang wrote: >>>>>> From: Youling Tang <tangyouling@kylinos.cn> >>>>>> >>>>>> - Reduces bkey_err() calls. >>>>>> - Avoid redundant calls to bch2_trans_iter_exit() in some functions. >>>>> no, a function that returns an error should clean up after itself >>>> Yes, functions should self-clean when they fail. >>>> >>>> However, there are repeated calls to bch2_trans_iter_exit in >>>> some functions, take lookup_inode() as an example, >>>> This is indeed the case. Personally, I think the bcachefs code is quite difficult to understand (such as too many macro, etc..), with many parts that could be simplified. I think may be only Kent himself can fully hold it. >>>> When bkey_err(k) returns a non-zero, call bch2_trans_iter_exit() >>>> once in bch2_bkey_get_iter(). It is then called again in >>>> lookup_inode() via 'goto err'. (We can correct it by simply changing >>>> it to 'return ret', but there are many similar cases.) >>> I'm aware, but I'm not looking to microoptimize at the expense of making >>> the code more fragile and less clear, especially right now when the >>> priority is stabilizing and fixing bugs. >>> >>> If you were also doing performance testing and could show that it >>> makes a measurable difference I'd consider it. Did you even look at the >>> assembly output for any of these functions? CSE might be optimizing away >>> the redundant calls. >> I haven't performed the corresponding performance testing. Looking at >> the assembly code, taking `lookup_inode()` as an example, >> > I think his point is that your modification is error-free, but improving > readability is not a high priority at the moment unless it actually > affects performance. There might currently be a shortage of manpower, > with many features to be completed and bugs to be fixed. >> Before the patch, >> 142f: 74 96 je 13c7 <lookup_inode+0x117> >> 1431: 48 8d b5 68 ff ff ff lea -0x98(%rbp),%rsi >> 1438: 4c 89 e7 mov %r12,%rdi >> 143b: e8 00 00 00 00 call 1440 <lookup_inode+0x190> >> 1440: eb b4 jmp 13f6 <lookup_inode+0x146> >> 1442: e8 00 00 00 00 call 1447 <lookup_inode+0x197> >> 1447: 66 0f 1f 84 00 00 00 nopw 0x0(%rax,%rax,1) >> 144e: 00 00 >> >> After, >> 111f: 74 96 je 10b7 <lookup_inode+0x117> >> 1121: eb c3 jmp 10e6 <lookup_inode+0x146> >> 1123: e8 00 00 00 00 call 1128 <lookup_inode+0x188> >> 1128: 0f 1f 84 00 00 00 00 nopl 0x0(%rax,%rax,1) >> 112f: 00 >> >> The following three assembly instructions have been reduced, >> 1431: 48 8d b5 68 ff ff ff lea -0x98(%rbp),%rsi >> 1438: 4c 89 e7 mov %r12,%rdi >> 143b: e8 00 00 00 00 call 1440 <lookup_inode+0x190> >>
© 2016 - 2026 Red Hat, Inc.