From: Zhang Yi <yi.zhang@huawei.com>
After large folios are supported on ext4, writing back a sufficiently
large and discontinuous folio may consume a significant number of
journal credits, placing considerable strain on the journal. For
example, in a 20GB filesystem with 1K block size and 1MB journal size,
writing back a 2MB folio could require thousands of credits in the
worst-case scenario (when each block is discontinuous and distributed
across different block groups), potentially exceeding the journal size.
This issue can also occur in ext4_write_begin() and ext4_page_mkwrite()
when delalloc is not enabled.
Fix this by ensuring that there are sufficient journal credits before
allocating an extent in mpage_map_one_extent() and _ext4_get_block(). If
there are not enough credits, return -EAGAIN, exit the current mapping
loop, restart a new handle and a new transaction, and allocating blocks
on this folio again in the next iteration.
Suggested-by: Jan Kara <jack@suse.cz>
Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
---
fs/ext4/inode.c | 45 +++++++++++++++++++++++++++++++++++++++------
1 file changed, 39 insertions(+), 6 deletions(-)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d0db6e3bf158..b51de58518b2 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -877,20 +877,44 @@ static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
} while (unlikely(!try_cmpxchg(&bh->b_state, &old_state, new_state)));
}
+/*
+ * Make sure that the current journal transaction has enough credits to map
+ * one extent. Return -EAGAIN if it cannot extend the current running
+ * transaction.
+ */
+static inline int ext4_journal_ensure_extent_credits(handle_t *handle,
+ struct inode *inode)
+{
+ int needed_credits;
+ int ret;
+
+ needed_credits = ext4_chunk_trans_blocks(inode, 1);
+ ret = __ext4_journal_ensure_credits(handle, needed_credits,
+ needed_credits, 0);
+ return ret <= 0 ? ret : -EAGAIN;
+}
+
static int _ext4_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int flags)
{
struct ext4_map_blocks map;
+ handle_t *handle = ext4_journal_current_handle();
int ret = 0;
if (ext4_has_inline_data(inode))
return -ERANGE;
+ /* Make sure transaction has enough credits for this extent */
+ if (flags & EXT4_GET_BLOCKS_CREATE) {
+ ret = ext4_journal_ensure_extent_credits(handle, inode);
+ if (ret)
+ return ret;
+ }
+
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
- ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
- flags);
+ ret = ext4_map_blocks(handle, inode, &map, flags);
if (ret > 0) {
map_bh(bh, inode->i_sb, map.m_pblk);
ext4_update_bh_state(bh, map.m_flags);
@@ -1374,8 +1398,9 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
ext4_orphan_del(NULL, inode);
}
- if (ret == -ENOSPC &&
- ext4_should_retry_alloc(inode->i_sb, &retries))
+ if (ret == -EAGAIN ||
+ (ret == -ENOSPC &&
+ ext4_should_retry_alloc(inode->i_sb, &retries)))
goto retry_journal;
folio_put(folio);
return ret;
@@ -2324,6 +2349,11 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
int get_blocks_flags;
int err, dioread_nolock;
+ /* Make sure transaction has enough credits for this extent */
+ err = ext4_journal_ensure_extent_credits(handle, inode);
+ if (err < 0)
+ return err;
+
trace_ext4_da_write_pages_extent(inode, map);
/*
* Call ext4_map_blocks() to allocate any delayed allocation blocks, or
@@ -2446,7 +2476,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
* In the case of ENOSPC, if ext4_count_free_blocks()
* is non-zero, a commit should free up blocks.
*/
- if ((err == -ENOMEM) ||
+ if ((err == -ENOMEM) || (err == -EAGAIN) ||
(err == -ENOSPC && ext4_count_free_clusters(sb))) {
/*
* We may have already allocated extents for
@@ -2953,6 +2983,8 @@ static int ext4_do_writepages(struct mpage_da_data *mpd)
ret = 0;
continue;
}
+ if (ret == -EAGAIN)
+ ret = 0;
/* Fatal error - ENOMEM, EIO... */
if (ret)
break;
@@ -6722,7 +6754,8 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
}
}
ext4_journal_stop(handle);
- if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ if (err == -EAGAIN ||
+ (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)))
goto retry_alloc;
out_ret:
ret = vmf_fs_error(err);
--
2.46.1
On Wed 11-06-25 19:16:22, Zhang Yi wrote:
> From: Zhang Yi <yi.zhang@huawei.com>
>
> After large folios are supported on ext4, writing back a sufficiently
> large and discontinuous folio may consume a significant number of
> journal credits, placing considerable strain on the journal. For
> example, in a 20GB filesystem with 1K block size and 1MB journal size,
> writing back a 2MB folio could require thousands of credits in the
> worst-case scenario (when each block is discontinuous and distributed
> across different block groups), potentially exceeding the journal size.
> This issue can also occur in ext4_write_begin() and ext4_page_mkwrite()
> when delalloc is not enabled.
>
> Fix this by ensuring that there are sufficient journal credits before
> allocating an extent in mpage_map_one_extent() and _ext4_get_block(). If
> there are not enough credits, return -EAGAIN, exit the current mapping
> loop, restart a new handle and a new transaction, and allocating blocks
> on this folio again in the next iteration.
>
> Suggested-by: Jan Kara <jack@suse.cz>
> Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
...
> static int _ext4_get_block(struct inode *inode, sector_t iblock,
> struct buffer_head *bh, int flags)
> {
> struct ext4_map_blocks map;
> + handle_t *handle = ext4_journal_current_handle();
> int ret = 0;
>
> if (ext4_has_inline_data(inode))
> return -ERANGE;
>
> + /* Make sure transaction has enough credits for this extent */
> + if (flags & EXT4_GET_BLOCKS_CREATE) {
> + ret = ext4_journal_ensure_extent_credits(handle, inode);
> + if (ret)
> + return ret;
> + }
> +
> map.m_lblk = iblock;
> map.m_len = bh->b_size >> inode->i_blkbits;
>
> - ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
> - flags);
> + ret = ext4_map_blocks(handle, inode, &map, flags);
Good spotting with ext4_page_mkwrite() and ext4_write_begin() also needing
this treatment! But rather then hiding the transaction extension in
_ext4_get_block() I'd do this in ext4_block_write_begin() where it is much
more obvious (and also it is much more obvious who needs to be prepared for
handling EAGAIN error). Otherwise the patch looks good!
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR
On 2025/6/20 0:33, Jan Kara wrote:
> On Wed 11-06-25 19:16:22, Zhang Yi wrote:
>> From: Zhang Yi <yi.zhang@huawei.com>
>>
>> After large folios are supported on ext4, writing back a sufficiently
>> large and discontinuous folio may consume a significant number of
>> journal credits, placing considerable strain on the journal. For
>> example, in a 20GB filesystem with 1K block size and 1MB journal size,
>> writing back a 2MB folio could require thousands of credits in the
>> worst-case scenario (when each block is discontinuous and distributed
>> across different block groups), potentially exceeding the journal size.
>> This issue can also occur in ext4_write_begin() and ext4_page_mkwrite()
>> when delalloc is not enabled.
>>
>> Fix this by ensuring that there are sufficient journal credits before
>> allocating an extent in mpage_map_one_extent() and _ext4_get_block(). If
>> there are not enough credits, return -EAGAIN, exit the current mapping
>> loop, restart a new handle and a new transaction, and allocating blocks
>> on this folio again in the next iteration.
>>
>> Suggested-by: Jan Kara <jack@suse.cz>
>> Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
>
> ...
>
>> static int _ext4_get_block(struct inode *inode, sector_t iblock,
>> struct buffer_head *bh, int flags)
>> {
>> struct ext4_map_blocks map;
>> + handle_t *handle = ext4_journal_current_handle();
>> int ret = 0;
>>
>> if (ext4_has_inline_data(inode))
>> return -ERANGE;
>>
>> + /* Make sure transaction has enough credits for this extent */
>> + if (flags & EXT4_GET_BLOCKS_CREATE) {
>> + ret = ext4_journal_ensure_extent_credits(handle, inode);
>> + if (ret)
>> + return ret;
>> + }
>> +
>> map.m_lblk = iblock;
>> map.m_len = bh->b_size >> inode->i_blkbits;
>>
>> - ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
>> - flags);
>> + ret = ext4_map_blocks(handle, inode, &map, flags);
>
> Good spotting with ext4_page_mkwrite() and ext4_write_begin() also needing
> this treatment! But rather then hiding the transaction extension in
> _ext4_get_block() I'd do this in ext4_block_write_begin() where it is much
> more obvious (and also it is much more obvious who needs to be prepared for
> handling EAGAIN error). Otherwise the patch looks good!
>
Yes, I completely agree with you. However, unfortunately, do this in
ext4_block_write_begin() only works for ext4_write_begin().
ext4_page_mkwrite() does not call ext4_block_write_begin() to allocate
blocks, it call the vfs helper __block_write_begin_int() instead.
vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
{
...
if (!ext4_should_journal_data(inode)) {
err = block_page_mkwrite(vma, vmf, get_block);
...
}
So...
Thanks,
Yi.
On Fri 20-06-25 13:00:32, Zhang Yi wrote:
> On 2025/6/20 0:33, Jan Kara wrote:
> > On Wed 11-06-25 19:16:22, Zhang Yi wrote:
> >> From: Zhang Yi <yi.zhang@huawei.com>
> >>
> >> After large folios are supported on ext4, writing back a sufficiently
> >> large and discontinuous folio may consume a significant number of
> >> journal credits, placing considerable strain on the journal. For
> >> example, in a 20GB filesystem with 1K block size and 1MB journal size,
> >> writing back a 2MB folio could require thousands of credits in the
> >> worst-case scenario (when each block is discontinuous and distributed
> >> across different block groups), potentially exceeding the journal size.
> >> This issue can also occur in ext4_write_begin() and ext4_page_mkwrite()
> >> when delalloc is not enabled.
> >>
> >> Fix this by ensuring that there are sufficient journal credits before
> >> allocating an extent in mpage_map_one_extent() and _ext4_get_block(). If
> >> there are not enough credits, return -EAGAIN, exit the current mapping
> >> loop, restart a new handle and a new transaction, and allocating blocks
> >> on this folio again in the next iteration.
> >>
> >> Suggested-by: Jan Kara <jack@suse.cz>
> >> Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
> >
> > ...
> >
> >> static int _ext4_get_block(struct inode *inode, sector_t iblock,
> >> struct buffer_head *bh, int flags)
> >> {
> >> struct ext4_map_blocks map;
> >> + handle_t *handle = ext4_journal_current_handle();
> >> int ret = 0;
> >>
> >> if (ext4_has_inline_data(inode))
> >> return -ERANGE;
> >>
> >> + /* Make sure transaction has enough credits for this extent */
> >> + if (flags & EXT4_GET_BLOCKS_CREATE) {
> >> + ret = ext4_journal_ensure_extent_credits(handle, inode);
> >> + if (ret)
> >> + return ret;
> >> + }
> >> +
> >> map.m_lblk = iblock;
> >> map.m_len = bh->b_size >> inode->i_blkbits;
> >>
> >> - ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
> >> - flags);
> >> + ret = ext4_map_blocks(handle, inode, &map, flags);
> >
> > Good spotting with ext4_page_mkwrite() and ext4_write_begin() also needing
> > this treatment! But rather then hiding the transaction extension in
> > _ext4_get_block() I'd do this in ext4_block_write_begin() where it is much
> > more obvious (and also it is much more obvious who needs to be prepared for
> > handling EAGAIN error). Otherwise the patch looks good!
> >
>
> Yes, I completely agree with you. However, unfortunately, do this in
> ext4_block_write_begin() only works for ext4_write_begin().
> ext4_page_mkwrite() does not call ext4_block_write_begin() to allocate
> blocks, it call the vfs helper __block_write_begin_int() instead.
>
> vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
> {
> ...
> if (!ext4_should_journal_data(inode)) {
> err = block_page_mkwrite(vma, vmf, get_block);
> ...
> }
>
>
> So...
Right, I forgot about the nodelalloc case. But since we do most of things
by hand for data=journal mode, perhaps we could lift some code from
data=journal mode and reuse it for nodelalloc as well like:
folio_lock(folio);
size = i_size_read(inode);
/* Page got truncated from under us? */
if (folio->mapping != mapping || folio_pos(folio) > size) {
ret = VM_FAULT_NOPAGE;
goto out_error;
}
len = folio_size(folio);
if (folio_pos(folio) + len > size)
len = size - folio_pos(folio);
err = ext4_block_write_begin(handle, folio, 0, len,
get_block);
if (err)
goto out_error;
if (!ext4_should_journal_data(inode))
block_commit_write(folio, 0, len);
folio_mark_dirty(folio);
} else {
if (ext4_journal_folio_buffers(handle, folio, len)) {
ret = VM_FAULT_SIGBUS;
goto out_error;
}
}
ext4_journal_stop(handle);
folio_wait_stable(folio);
We get an additional bonus for not waiting for page writeback with
transaction handle held (which is a potential deadlock vector). What do you
think?
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR
On 2025/6/20 22:18, Jan Kara wrote:
> On Fri 20-06-25 13:00:32, Zhang Yi wrote:
>> On 2025/6/20 0:33, Jan Kara wrote:
>>> On Wed 11-06-25 19:16:22, Zhang Yi wrote:
>>>> From: Zhang Yi <yi.zhang@huawei.com>
>>>>
>>>> After large folios are supported on ext4, writing back a sufficiently
>>>> large and discontinuous folio may consume a significant number of
>>>> journal credits, placing considerable strain on the journal. For
>>>> example, in a 20GB filesystem with 1K block size and 1MB journal size,
>>>> writing back a 2MB folio could require thousands of credits in the
>>>> worst-case scenario (when each block is discontinuous and distributed
>>>> across different block groups), potentially exceeding the journal size.
>>>> This issue can also occur in ext4_write_begin() and ext4_page_mkwrite()
>>>> when delalloc is not enabled.
>>>>
>>>> Fix this by ensuring that there are sufficient journal credits before
>>>> allocating an extent in mpage_map_one_extent() and _ext4_get_block(). If
>>>> there are not enough credits, return -EAGAIN, exit the current mapping
>>>> loop, restart a new handle and a new transaction, and allocating blocks
>>>> on this folio again in the next iteration.
>>>>
>>>> Suggested-by: Jan Kara <jack@suse.cz>
>>>> Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
>>>
>>> ...
>>>
>>>> static int _ext4_get_block(struct inode *inode, sector_t iblock,
>>>> struct buffer_head *bh, int flags)
>>>> {
>>>> struct ext4_map_blocks map;
>>>> + handle_t *handle = ext4_journal_current_handle();
>>>> int ret = 0;
>>>>
>>>> if (ext4_has_inline_data(inode))
>>>> return -ERANGE;
>>>>
>>>> + /* Make sure transaction has enough credits for this extent */
>>>> + if (flags & EXT4_GET_BLOCKS_CREATE) {
>>>> + ret = ext4_journal_ensure_extent_credits(handle, inode);
>>>> + if (ret)
>>>> + return ret;
>>>> + }
>>>> +
>>>> map.m_lblk = iblock;
>>>> map.m_len = bh->b_size >> inode->i_blkbits;
>>>>
>>>> - ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
>>>> - flags);
>>>> + ret = ext4_map_blocks(handle, inode, &map, flags);
>>>
>>> Good spotting with ext4_page_mkwrite() and ext4_write_begin() also needing
>>> this treatment! But rather then hiding the transaction extension in
>>> _ext4_get_block() I'd do this in ext4_block_write_begin() where it is much
>>> more obvious (and also it is much more obvious who needs to be prepared for
>>> handling EAGAIN error). Otherwise the patch looks good!
>>>
>>
>> Yes, I completely agree with you. However, unfortunately, do this in
>> ext4_block_write_begin() only works for ext4_write_begin().
>> ext4_page_mkwrite() does not call ext4_block_write_begin() to allocate
>> blocks, it call the vfs helper __block_write_begin_int() instead.
>>
>> vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
>> {
>> ...
>> if (!ext4_should_journal_data(inode)) {
>> err = block_page_mkwrite(vma, vmf, get_block);
>> ...
>> }
>>
>>
>> So...
>
> Right, I forgot about the nodelalloc case. But since we do most of things
> by hand for data=journal mode, perhaps we could lift some code from
> data=journal mode and reuse it for nodelalloc as well like:
>
> folio_lock(folio);
> size = i_size_read(inode);
> /* Page got truncated from under us? */
> if (folio->mapping != mapping || folio_pos(folio) > size) {
> ret = VM_FAULT_NOPAGE;
> goto out_error;
> }
>
> len = folio_size(folio);
> if (folio_pos(folio) + len > size)
> len = size - folio_pos(folio);
>
> err = ext4_block_write_begin(handle, folio, 0, len,
> get_block);
> if (err)
> goto out_error;
> if (!ext4_should_journal_data(inode))
> block_commit_write(folio, 0, len);
> folio_mark_dirty(folio);
> } else {
> if (ext4_journal_folio_buffers(handle, folio, len)) {
> ret = VM_FAULT_SIGBUS;
> goto out_error;
> }
> }
> ext4_journal_stop(handle);
> folio_wait_stable(folio);
>
> We get an additional bonus for not waiting for page writeback with
> transaction handle held (which is a potential deadlock vector). What do you
> think?
>
Yeah, this solution looks nice to me, it should works! Thank you for
the suggestion.
Best regards,
Yi.
© 2016 - 2026 Red Hat, Inc.