fs/f2fs/data.c | 1 + fs/f2fs/f2fs.h | 3 +++ fs/f2fs/super.c | 1 + fs/f2fs/sysfs.c | 9 +++++++++ 4 files changed, 14 insertions(+)
This patch adds a sysfs entry to change the folio order for readahead.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
---
fs/f2fs/data.c | 1 +
fs/f2fs/f2fs.h | 3 +++
fs/f2fs/super.c | 1 +
fs/f2fs/sysfs.c | 9 +++++++++
4 files changed, 14 insertions(+)
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index c80d7960b652..faf1faa27c41 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2764,6 +2764,7 @@ int f2fs_readahead_pages(struct file *file, loff_t offset, loff_t len)
while (nrpages) {
unsigned long this_chunk = min(nrpages, ra_pages);
+ ractl.ra->desired_order = F2FS_I_SB(inode)->ra_folio_order;
ractl.ra->ra_pages = this_chunk;
page_cache_sync_ra(&ractl, this_chunk << 1);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 934287cc5624..0e61e253d861 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1921,6 +1921,9 @@ struct f2fs_sb_info {
/* carve out reserved_blocks from total blocks */
bool carve_out;
+ /* enable large folio. */
+ unsigned int ra_folio_order;
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct kmem_cache *page_array_slab; /* page array entry */
unsigned int page_array_slab_size; /* default page array slab size */
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index d47ec718f3be..dabac6f288f0 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -4287,6 +4287,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
NAT_ENTRY_PER_BLOCK));
sbi->allocate_section_hint = le32_to_cpu(raw_super->section_count);
sbi->allocate_section_policy = ALLOCATE_FORWARD_NOHINT;
+ sbi->ra_folio_order = 0;
F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index c42f4f979d13..2537a25986a6 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -906,6 +906,13 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
return count;
}
+ if (!strcmp(a->attr.name, "ra_folio_order")) {
+ if (t < 0 || t > MAX_PAGECACHE_ORDER)
+ return -EINVAL;
+ sbi->ra_folio_order = t;
+ return count;
+ }
+
*ui = (unsigned int)t;
return count;
@@ -1180,6 +1187,7 @@ F2FS_SBI_GENERAL_RW_ATTR(migration_window_granularity);
F2FS_SBI_GENERAL_RW_ATTR(dir_level);
F2FS_SBI_GENERAL_RW_ATTR(allocate_section_hint);
F2FS_SBI_GENERAL_RW_ATTR(allocate_section_policy);
+F2FS_SBI_GENERAL_RW_ATTR(ra_folio_order);
#ifdef CONFIG_F2FS_IOSTAT
F2FS_SBI_GENERAL_RW_ATTR(iostat_enable);
F2FS_SBI_GENERAL_RW_ATTR(iostat_period_ms);
@@ -1422,6 +1430,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(reserved_pin_section),
ATTR_LIST(allocate_section_hint),
ATTR_LIST(allocate_section_policy),
+ ATTR_LIST(ra_folio_order),
NULL,
};
ATTRIBUTE_GROUPS(f2fs);
--
2.52.0.487.g5c8c507ade-goog
On Fri, Nov 21, 2025 at 01:48:46AM +0000, Jaegeuk Kim wrote:
> This patch adds a sysfs entry to change the folio order for readahead.
You'll need to explain why this is useful. And why this is f2fs
specific instead of generic.
>
> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
> ---
> fs/f2fs/data.c | 1 +
> fs/f2fs/f2fs.h | 3 +++
> fs/f2fs/super.c | 1 +
> fs/f2fs/sysfs.c | 9 +++++++++
> 4 files changed, 14 insertions(+)
>
> diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
> index c80d7960b652..faf1faa27c41 100644
> --- a/fs/f2fs/data.c
> +++ b/fs/f2fs/data.c
> @@ -2764,6 +2764,7 @@ int f2fs_readahead_pages(struct file *file, loff_t offset, loff_t len)
> while (nrpages) {
> unsigned long this_chunk = min(nrpages, ra_pages);
>
> + ractl.ra->desired_order = F2FS_I_SB(inode)->ra_folio_order;
> ractl.ra->ra_pages = this_chunk;
>
> page_cache_sync_ra(&ractl, this_chunk << 1);
> diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
> index 934287cc5624..0e61e253d861 100644
> --- a/fs/f2fs/f2fs.h
> +++ b/fs/f2fs/f2fs.h
> @@ -1921,6 +1921,9 @@ struct f2fs_sb_info {
> /* carve out reserved_blocks from total blocks */
> bool carve_out;
>
> + /* enable large folio. */
> + unsigned int ra_folio_order;
> +
> #ifdef CONFIG_F2FS_FS_COMPRESSION
> struct kmem_cache *page_array_slab; /* page array entry */
> unsigned int page_array_slab_size; /* default page array slab size */
> diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
> index d47ec718f3be..dabac6f288f0 100644
> --- a/fs/f2fs/super.c
> +++ b/fs/f2fs/super.c
> @@ -4287,6 +4287,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
> NAT_ENTRY_PER_BLOCK));
> sbi->allocate_section_hint = le32_to_cpu(raw_super->section_count);
> sbi->allocate_section_policy = ALLOCATE_FORWARD_NOHINT;
> + sbi->ra_folio_order = 0;
> F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
> F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
> F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
> diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
> index c42f4f979d13..2537a25986a6 100644
> --- a/fs/f2fs/sysfs.c
> +++ b/fs/f2fs/sysfs.c
> @@ -906,6 +906,13 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
> return count;
> }
>
> + if (!strcmp(a->attr.name, "ra_folio_order")) {
> + if (t < 0 || t > MAX_PAGECACHE_ORDER)
> + return -EINVAL;
> + sbi->ra_folio_order = t;
> + return count;
> + }
> +
> *ui = (unsigned int)t;
>
> return count;
> @@ -1180,6 +1187,7 @@ F2FS_SBI_GENERAL_RW_ATTR(migration_window_granularity);
> F2FS_SBI_GENERAL_RW_ATTR(dir_level);
> F2FS_SBI_GENERAL_RW_ATTR(allocate_section_hint);
> F2FS_SBI_GENERAL_RW_ATTR(allocate_section_policy);
> +F2FS_SBI_GENERAL_RW_ATTR(ra_folio_order);
> #ifdef CONFIG_F2FS_IOSTAT
> F2FS_SBI_GENERAL_RW_ATTR(iostat_enable);
> F2FS_SBI_GENERAL_RW_ATTR(iostat_period_ms);
> @@ -1422,6 +1430,7 @@ static struct attribute *f2fs_attrs[] = {
> ATTR_LIST(reserved_pin_section),
> ATTR_LIST(allocate_section_hint),
> ATTR_LIST(allocate_section_policy),
> + ATTR_LIST(ra_folio_order),
> NULL,
> };
> ATTRIBUTE_GROUPS(f2fs);
> --
> 2.52.0.487.g5c8c507ade-goog
>
>
---end quoted text---
© 2016 - 2025 Red Hat, Inc.