fs/bcachefs/fs-io-direct.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
when writing file with direct_IO on bcachefs, then performance is
much lower than other fs due to write back throttle in block layer:
wbt_wait+1
__rq_qos_throttle+32
blk_mq_submit_bio+394
submit_bio_noacct_nocheck+649
bch2_submit_wbio_replicas+538
__bch2_write+2539
bch2_direct_write+1663
bch2_write_iter+318
aio_write+355
io_submit_one+1224
__x64_sys_io_submit+169
do_syscall_64+134
entry_SYSCALL_64_after_hwframe+110
add set REQ_SYNC and REQ_IDLE in bio->bi_opf as standard dirct-io
Signed-off-by: zhuxiaohui <zhuxiaohui.400@bytedance.com>
---
fs/bcachefs/fs-io-direct.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 33cb6da3a5ad..f49e6c0f0f68 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -536,7 +536,7 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
if (likely(!dio->iter.count) || dio->op.error)
break;
- bio_reset(bio, NULL, REQ_OP_WRITE);
+ bio_reset(bio, NULL, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
}
out:
return bch2_dio_write_done(dio);
@@ -618,7 +618,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
- REQ_OP_WRITE,
+ REQ_OP_WRITE | REQ_SYNC | REQ_IDLE,
GFP_KERNEL,
&c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio);
--
2.41.0
On Tue, Mar 26, 2024 at 08:03:45PM +0800, zhuxiaohui wrote: > when writing file with direct_IO on bcachefs, then performance is > much lower than other fs due to write back throttle in block layer: > > wbt_wait+1 > __rq_qos_throttle+32 > blk_mq_submit_bio+394 > submit_bio_noacct_nocheck+649 > bch2_submit_wbio_replicas+538 > __bch2_write+2539 > bch2_direct_write+1663 > bch2_write_iter+318 > aio_write+355 > io_submit_one+1224 > __x64_sys_io_submit+169 > do_syscall_64+134 > entry_SYSCALL_64_after_hwframe+110 > > add set REQ_SYNC and REQ_IDLE in bio->bi_opf as standard dirct-io > > Signed-off-by: zhuxiaohui <zhuxiaohui.400@bytedance.com> Can I get you interested in a bigger project? The writeback throttling code is a problem; it really shouldn't be specific to writeback, it really ought to be a general purpose backpressure mechanism. Unfortunately, I've found the code to be opaque, practically to the point of obfuscation, so it's going to be a difficult nut to crack. But the lack of higher level, more workable backpressure is a problem, it leads to queueing delays and priority inversions in filesystem code and below. > --- > fs/bcachefs/fs-io-direct.c | 4 ++-- > 1 file changed, 2 insertions(+), 2 deletions(-) > > diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c > index 33cb6da3a5ad..f49e6c0f0f68 100644 > --- a/fs/bcachefs/fs-io-direct.c > +++ b/fs/bcachefs/fs-io-direct.c > @@ -536,7 +536,7 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio) > if (likely(!dio->iter.count) || dio->op.error) > break; > > - bio_reset(bio, NULL, REQ_OP_WRITE); > + bio_reset(bio, NULL, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE); > } > out: > return bch2_dio_write_done(dio); > @@ -618,7 +618,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) > > bio = bio_alloc_bioset(NULL, > bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS), > - REQ_OP_WRITE, > + REQ_OP_WRITE | REQ_SYNC | REQ_IDLE, > GFP_KERNEL, > &c->dio_write_bioset); > dio = container_of(bio, struct dio_write, op.wbio.bio); > -- > 2.41.0 >
© 2016 - 2026 Red Hat, Inc.