Let's say we want to allocate 2 blocks starting from 4294966386, after
predicting the file size, start is aligned to 4294965248, len is changed
to 2048, then end = start + size = 0x100000000. Since end is of
type ext4_lblk_t, i.e. uint, end is truncated to 0.
This causes (pa->pa_lstart >= end) to always hold when checking if the
current extent to be allocated crosses already preallocated blocks, so the
resulting ac_g_ex may cross already preallocated blocks.
Hence we convert the end type to loff_t and use pa_end() to avoid overflow.
Signed-off-by: Baokun Li <libaokun1@huawei.com>
---
fs/ext4/mballoc.c | 21 ++++++++++-----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 2090e5e7ba58..77d47af525d9 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4222,12 +4222,13 @@ ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_
static inline void
ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
- ext4_lblk_t start, ext4_lblk_t end)
+ ext4_lblk_t start, loff_t end)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_prealloc_space *tmp_pa;
- ext4_lblk_t tmp_pa_start, tmp_pa_end;
+ ext4_lblk_t tmp_pa_start;
+ loff_t tmp_pa_end;
struct rb_node *iter;
read_lock(&ei->i_prealloc_lock);
@@ -4236,7 +4237,7 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
pa_node.inode_node);
tmp_pa_start = tmp_pa->pa_lstart;
- tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+ tmp_pa_end = pa_end(sbi, tmp_pa);
spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted == 0)
@@ -4258,14 +4259,14 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
*/
static inline void
ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
- ext4_lblk_t *start, ext4_lblk_t *end)
+ ext4_lblk_t *start, loff_t *end)
{
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
struct rb_node *iter;
- ext4_lblk_t new_start, new_end;
- ext4_lblk_t tmp_pa_start, tmp_pa_end, left_pa_end = -1, right_pa_start = -1;
+ ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
+ loff_t new_end, tmp_pa_end, left_pa_end = -1;
new_start = *start;
new_end = *end;
@@ -4284,7 +4285,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
pa_node.inode_node);
tmp_pa_start = tmp_pa->pa_lstart;
- tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
+ tmp_pa_end = pa_end(sbi, tmp_pa);
/* PA must not overlap original request */
spin_lock(&tmp_pa->pa_lock);
@@ -4364,8 +4365,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
}
if (left_pa) {
- left_pa_end =
- left_pa->pa_lstart + EXT4_C2B(sbi, left_pa->pa_len);
+ left_pa_end = pa_end(sbi, left_pa);
BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
}
@@ -4404,8 +4404,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_super_block *es = sbi->s_es;
int bsbits, max;
- ext4_lblk_t end;
- loff_t size, start_off;
+ loff_t size, start_off, end;
loff_t orig_size __maybe_unused;
ext4_lblk_t start;
--
2.31.1
Baokun Li <libaokun1@huawei.com> writes:
> Let's say we want to allocate 2 blocks starting from 4294966386, after
> predicting the file size, start is aligned to 4294965248, len is changed
> to 2048, then end = start + size = 0x100000000. Since end is of
> type ext4_lblk_t, i.e. uint, end is truncated to 0.
>
> This causes (pa->pa_lstart >= end) to always hold when checking if the
> current extent to be allocated crosses already preallocated blocks, so the
> resulting ac_g_ex may cross already preallocated blocks.
>
> Hence we convert the end type to loff_t and use pa_end() to avoid overflow.
>
> Signed-off-by: Baokun Li <libaokun1@huawei.com>
Thanks again for spotting. Looks good to me (with pa_end() dropped).
-ritesh
> ---
> fs/ext4/mballoc.c | 21 ++++++++++-----------
> 1 file changed, 10 insertions(+), 11 deletions(-)
>
> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
> index 2090e5e7ba58..77d47af525d9 100644
> --- a/fs/ext4/mballoc.c
> +++ b/fs/ext4/mballoc.c
> @@ -4222,12 +4222,13 @@ ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_
>
> static inline void
> ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
> - ext4_lblk_t start, ext4_lblk_t end)
> + ext4_lblk_t start, loff_t end)
> {
> struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
> struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
> struct ext4_prealloc_space *tmp_pa;
> - ext4_lblk_t tmp_pa_start, tmp_pa_end;
> + ext4_lblk_t tmp_pa_start;
> + loff_t tmp_pa_end;
> struct rb_node *iter;
>
> read_lock(&ei->i_prealloc_lock);
> @@ -4236,7 +4237,7 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
> tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
> pa_node.inode_node);
> tmp_pa_start = tmp_pa->pa_lstart;
> - tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
> + tmp_pa_end = pa_end(sbi, tmp_pa);
>
> spin_lock(&tmp_pa->pa_lock);
> if (tmp_pa->pa_deleted == 0)
> @@ -4258,14 +4259,14 @@ ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac,
> */
> static inline void
> ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
> - ext4_lblk_t *start, ext4_lblk_t *end)
> + ext4_lblk_t *start, loff_t *end)
> {
> struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
> struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
> struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL;
> struct rb_node *iter;
> - ext4_lblk_t new_start, new_end;
> - ext4_lblk_t tmp_pa_start, tmp_pa_end, left_pa_end = -1, right_pa_start = -1;
> + ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1;
> + loff_t new_end, tmp_pa_end, left_pa_end = -1;
>
> new_start = *start;
> new_end = *end;
> @@ -4284,7 +4285,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
> tmp_pa = rb_entry(iter, struct ext4_prealloc_space,
> pa_node.inode_node);
> tmp_pa_start = tmp_pa->pa_lstart;
> - tmp_pa_end = tmp_pa->pa_lstart + EXT4_C2B(sbi, tmp_pa->pa_len);
> + tmp_pa_end = pa_end(sbi, tmp_pa);
>
> /* PA must not overlap original request */
> spin_lock(&tmp_pa->pa_lock);
> @@ -4364,8 +4365,7 @@ ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac,
> }
>
> if (left_pa) {
> - left_pa_end =
> - left_pa->pa_lstart + EXT4_C2B(sbi, left_pa->pa_len);
> + left_pa_end = pa_end(sbi, left_pa);
> BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical);
> }
>
> @@ -4404,8 +4404,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
> struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
> struct ext4_super_block *es = sbi->s_es;
> int bsbits, max;
> - ext4_lblk_t end;
> - loff_t size, start_off;
> + loff_t size, start_off, end;
> loff_t orig_size __maybe_unused;
> ext4_lblk_t start;
>
> --
> 2.31.1
© 2016 - 2026 Red Hat, Inc.