fs/f2fs/data.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-)
When fewer pages are read, nr_pages may be smaller than nr_cpages. Due
to the nr_vecs limit, the compressed pages will be split into multiple
bios and then merged at the block level. In this case, nr_cpages should
be used to pre-allocate bvecs.
To handle this case, align max_nr_pages to cluster_size, which should be
enough for all compressed pages.
Signed-off-by: Jianan Huang <huangjianan@xiaomi.com>
Signed-off-by: Sheng Yong <shengyong1@xiaomi.com>
---
Changes since v1:
- Use aligned nr_pages instead of nr_cpages to pre-allocate bvecs.
fs/f2fs/data.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 31e892842625..2d948586fea0 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2303,7 +2303,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
}
if (!bio) {
- bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
+ bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages - i,
f2fs_ra_op_flags(rac),
folio->index, for_write);
if (IS_ERR(bio)) {
@@ -2370,12 +2370,18 @@ static int f2fs_mpage_readpages(struct inode *inode,
.nr_cpages = 0,
};
pgoff_t nc_cluster_idx = NULL_CLUSTER;
- pgoff_t index;
+ pgoff_t index = rac ? readahead_index(rac) : folio->index;
#endif
unsigned nr_pages = rac ? readahead_count(rac) : 1;
unsigned max_nr_pages = nr_pages;
int ret = 0;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (f2fs_compressed_file(inode))
+ max_nr_pages = round_up(index + nr_pages, cc.cluster_size) -
+ round_down(index, cc.cluster_size);
+#endif
+
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
@@ -2385,7 +2391,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
map.m_seg_type = NO_CHECK_TYPE;
map.m_may_create = false;
- for (; nr_pages; nr_pages--) {
+ for (; nr_pages; nr_pages--, max_nr_pages--) {
if (rac) {
folio = readahead_folio(rac);
prefetchw(&folio->flags);
--
2.43.0
On 6/18/25 16:17, Jianan Huang wrote:
> When fewer pages are read, nr_pages may be smaller than nr_cpages. Due
> to the nr_vecs limit, the compressed pages will be split into multiple
> bios and then merged at the block level. In this case, nr_cpages should
> be used to pre-allocate bvecs.
> To handle this case, align max_nr_pages to cluster_size, which should be
> enough for all compressed pages.
>
> Signed-off-by: Jianan Huang <huangjianan@xiaomi.com>
> Signed-off-by: Sheng Yong <shengyong1@xiaomi.com>
> ---
> Changes since v1:
> - Use aligned nr_pages instead of nr_cpages to pre-allocate bvecs.
>
> fs/f2fs/data.c | 12 +++++++++---
> 1 file changed, 9 insertions(+), 3 deletions(-)
>
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 31e892842625..2d948586fea0 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2303,7 +2303,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> }
>
> if (!bio) {
> - bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
> + bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages - i,
> f2fs_ra_op_flags(rac),
> folio->index, for_write);
> if (IS_ERR(bio)) {
> @@ -2370,12 +2370,18 @@ static int f2fs_mpage_readpages(struct inode *inode,
> .nr_cpages = 0,
> };
> pgoff_t nc_cluster_idx = NULL_CLUSTER;
> - pgoff_t index;
> + pgoff_t index = rac ? readahead_index(rac) : folio->index;
For non-compressed file, it's redundant.
> #endif
> unsigned nr_pages = rac ? readahead_count(rac) : 1;
> unsigned max_nr_pages = nr_pages;
> int ret = 0;
>
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> + if (f2fs_compressed_file(inode))
index = rac ? readahead_index(rac) : folio->index;
Thanks,
> + max_nr_pages = round_up(index + nr_pages, cc.cluster_size) -
> + round_down(index, cc.cluster_size);
> +#endif
> +
> map.m_pblk = 0;
> map.m_lblk = 0;
> map.m_len = 0;
> @@ -2385,7 +2391,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
> map.m_seg_type = NO_CHECK_TYPE;
> map.m_may_create = false;
>
> - for (; nr_pages; nr_pages--) {
> + for (; nr_pages; nr_pages--, max_nr_pages--) {
> if (rac) {
> folio = readahead_folio(rac);
> prefetchw(&folio->flags);
On 2025/6/18 16:17, Jianan Huang wrote:
> When fewer pages are read, nr_pages may be smaller than nr_cpages. Due
> to the nr_vecs limit, the compressed pages will be split into multiple
> bios and then merged at the block level. In this case, nr_cpages should
> be used to pre-allocate bvecs.
> To handle this case, align max_nr_pages to cluster_size, which should be
> enough for all compressed pages.
>
> Signed-off-by: Jianan Huang <huangjianan@xiaomi.com>
> Signed-off-by: Sheng Yong <shengyong1@xiaomi.com>
> ---
> Changes since v1:
> - Use aligned nr_pages instead of nr_cpages to pre-allocate bvecs.
>
> fs/f2fs/data.c | 12 +++++++++---
> 1 file changed, 9 insertions(+), 3 deletions(-)
>
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index 31e892842625..2d948586fea0 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2303,7 +2303,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
> }
>
> if (!bio) {
> - bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
> + bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages - i,
> f2fs_ra_op_flags(rac),
> folio->index, for_write);
> if (IS_ERR(bio)) {
> @@ -2370,12 +2370,18 @@ static int f2fs_mpage_readpages(struct inode *inode,
> .nr_cpages = 0,
> };
> pgoff_t nc_cluster_idx = NULL_CLUSTER;
> - pgoff_t index;
> + pgoff_t index = rac ? readahead_index(rac) : folio->index;
> #endif
> unsigned nr_pages = rac ? readahead_count(rac) : 1;
> unsigned max_nr_pages = nr_pages;
> int ret = 0;
>
> +#ifdef CONFIG_F2FS_FS_COMPRESSION
> + if (f2fs_compressed_file(inode))
> + max_nr_pages = round_up(index + nr_pages, cc.cluster_size) -
> + round_down(index, cc.cluster_size);
> +#endif
> +
> map.m_pblk = 0;
> map.m_lblk = 0;
> map.m_len = 0;
> @@ -2385,7 +2391,7 @@ static int f2fs_mpage_readpages(struct inode *inode,
> map.m_seg_type = NO_CHECK_TYPE;
> map.m_may_create = false;
>
> - for (; nr_pages; nr_pages--) {
> + for (; nr_pages; nr_pages--, max_nr_pages--) {
> if (rac) {
> folio = readahead_folio(rac);
> prefetchw(&folio->flags);
ping~
© 2016 - 2026 Red Hat, Inc.