Converted the function to use folios. This is in preparation for the
removal of find_get_pages_range_tag().
Also modified f2fs_all_cluster_page_ready to take in a folio_batch instead
of pagevec. This does NOT support large folios. The function currently
only utilizes folios of size 1 so this shouldn't cause any issues right
now.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
fs/f2fs/compress.c | 13 ++++-----
fs/f2fs/data.c | 67 +++++++++++++++++++++++++---------------------
fs/f2fs/f2fs.h | 5 ++--
3 files changed, 46 insertions(+), 39 deletions(-)
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 70e97075e535..e1bd2e859f64 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -841,10 +841,11 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
return is_page_in_cluster(cc, index);
}
-bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
- int index, int nr_pages, bool uptodate)
+bool f2fs_all_cluster_page_ready(struct compress_ctx *cc,
+ struct folio_batch *fbatch,
+ int index, int nr_folios, bool uptodate)
{
- unsigned long pgidx = pages[index]->index;
+ unsigned long pgidx = fbatch->folios[index]->index;
int i = uptodate ? 0 : 1;
/*
@@ -854,13 +855,13 @@ bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
if (uptodate && (pgidx % cc->cluster_size))
return false;
- if (nr_pages - index < cc->cluster_size)
+ if (nr_folios - index < cc->cluster_size)
return false;
for (; i < cc->cluster_size; i++) {
- if (pages[index + i]->index != pgidx + i)
+ if (fbatch->folios[index + i]->index != pgidx + i)
return false;
- if (uptodate && !PageUptodate(pages[index + i]))
+ if (uptodate && !folio_test_uptodate(fbatch->folios[index + i]))
return false;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index aa3ccddfa037..f87b9644b10b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2917,7 +2917,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
{
int ret = 0;
int done = 0, retry = 0;
- struct page *pages[F2FS_ONSTACK_PAGES];
+ struct folio_batch fbatch;
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct bio *bio = NULL;
sector_t last_block;
@@ -2938,7 +2938,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
.private = NULL,
};
#endif
- int nr_pages;
+ int nr_folios;
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
@@ -2948,6 +2948,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int submitted = 0;
int i;
+ folio_batch_init(&fbatch);
+
if (get_dirty_pages(mapping->host) <=
SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
set_inode_flag(mapping->host, FI_HOT_DATA);
@@ -2973,13 +2975,13 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !retry && (index <= end)) {
- nr_pages = find_get_pages_range_tag(mapping, &index, end,
- tag, F2FS_ONSTACK_PAGES, pages);
- if (nr_pages == 0)
+ nr_folios = filemap_get_folios_tag(mapping, &index, end,
+ tag, &fbatch);
+ if (nr_folios == 0)
break;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pages[i];
+ for (i = 0; i < nr_folios; i++) {
+ struct folio *folio = fbatch.folios[i];
bool need_readd;
readd:
need_readd = false;
@@ -2996,7 +2998,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
}
if (!f2fs_cluster_can_merge_page(&cc,
- page->index)) {
+ folio->index)) {
ret = f2fs_write_multi_pages(&cc,
&submitted, wbc, io_type);
if (!ret)
@@ -3005,27 +3007,28 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
}
if (unlikely(f2fs_cp_error(sbi)))
- goto lock_page;
+ goto lock_folio;
if (!f2fs_cluster_is_empty(&cc))
- goto lock_page;
+ goto lock_folio;
if (f2fs_all_cluster_page_ready(&cc,
- pages, i, nr_pages, true))
+ &fbatch, i, nr_pages, true))
goto lock_page;
ret2 = f2fs_prepare_compress_overwrite(
inode, &pagep,
- page->index, &fsdata);
+ folio->index, &fsdata);
if (ret2 < 0) {
ret = ret2;
done = 1;
break;
} else if (ret2 &&
(!f2fs_compress_write_end(inode,
- fsdata, page->index, 1) ||
+ fsdata, folio->index, 1) ||
!f2fs_all_cluster_page_ready(&cc,
- pages, i, nr_pages, false))) {
+ &fbatch, i, nr_folios,
+ false))) {
retry = 1;
break;
}
@@ -3038,46 +3041,47 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
break;
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
-lock_page:
+lock_folio:
#endif
- done_index = page->index;
+ done_index = folio->index;
retry_write:
- lock_page(page);
+ folio_lock(folio);
- if (unlikely(page->mapping != mapping)) {
+ if (unlikely(folio->mapping != mapping)) {
continue_unlock:
- unlock_page(page);
+ folio_unlock(folio);
continue;
}
- if (!PageDirty(page)) {
+ if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
- if (PageWriteback(page)) {
+ if (folio_test_writeback(folio)) {
if (wbc->sync_mode != WB_SYNC_NONE)
- f2fs_wait_on_page_writeback(page,
+ f2fs_wait_on_page_writeback(
+ &folio->page,
DATA, true, true);
else
goto continue_unlock;
}
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
- get_page(page);
- f2fs_compress_ctx_add_page(&cc, page);
+ folio_get(folio);
+ f2fs_compress_ctx_add_page(&cc, &folio->page);
continue;
}
#endif
- ret = f2fs_write_single_data_page(page, &submitted,
- &bio, &last_block, wbc, io_type,
- 0, true);
+ ret = f2fs_write_single_data_page(&folio->page,
+ &submitted, &bio, &last_block,
+ wbc, io_type, 0, true);
if (ret == AOP_WRITEPAGE_ACTIVATE)
- unlock_page(page);
+ folio_unlock(folio);
#ifdef CONFIG_F2FS_FS_COMPRESSION
result:
#endif
@@ -3101,7 +3105,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
}
goto next;
}
- done_index = page->index + 1;
+ done_index = folio->index +
+ folio_nr_pages(folio);
done = 1;
break;
}
@@ -3115,7 +3120,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
if (need_readd)
goto readd;
}
- release_pages(pages, nr_pages);
+ folio_batch_release(&fbatch);
cond_resched();
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 3c7cdb70fe2e..dcb28240f724 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -4196,8 +4196,9 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
block_t blkaddr, bool in_task);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
-bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
- int index, int nr_pages, bool uptodate);
+bool f2fs_all_cluster_page_ready(struct compress_ctx *cc,
+ struct folio_batch *fbatch, int index, int nr_folios,
+ bool uptodate);
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
int f2fs_write_multi_pages(struct compress_ctx *cc,
--
2.36.1
Hi "Vishal,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on jaegeuk-f2fs/dev-test]
[also build test ERROR on kdave/for-next linus/master v6.0-rc3]
[cannot apply to ceph-client/for-linus next-20220901]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Vishal-Moola-Oracle/Convert-to-filemap_get_folios_tag/20220902-060430
base: https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git dev-test
config: arc-randconfig-r043-20220901 (https://download.01.org/0day-ci/archive/20220903/202209030512.9yAy8edt-lkp@intel.com/config)
compiler: arc-elf-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/intel-lab-lkp/linux/commit/6c74320953cd3749db95f9f09c1fc7d044933635
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Vishal-Moola-Oracle/Convert-to-filemap_get_folios_tag/20220902-060430
git checkout 6c74320953cd3749db95f9f09c1fc7d044933635
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=arc SHELL=/bin/bash fs/
If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
fs/f2fs/data.c: In function 'f2fs_write_cache_pages':
>> fs/f2fs/data.c:3016:53: error: 'nr_pages' undeclared (first use in this function); did you mean 'dir_pages'?
3016 | &fbatch, i, nr_pages, true))
| ^~~~~~~~
| dir_pages
fs/f2fs/data.c:3016:53: note: each undeclared identifier is reported only once for each function it appears in
>> fs/f2fs/data.c:3017:41: error: label 'lock_page' used but not defined
3017 | goto lock_page;
| ^~~~
vim +3016 fs/f2fs/data.c
2908
2909 /*
2910 * This function was copied from write_cche_pages from mm/page-writeback.c.
2911 * The major change is making write step of cold data page separately from
2912 * warm/hot data page.
2913 */
2914 static int f2fs_write_cache_pages(struct address_space *mapping,
2915 struct writeback_control *wbc,
2916 enum iostat_type io_type)
2917 {
2918 int ret = 0;
2919 int done = 0, retry = 0;
2920 struct folio_batch fbatch;
2921 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2922 struct bio *bio = NULL;
2923 sector_t last_block;
2924 #ifdef CONFIG_F2FS_FS_COMPRESSION
2925 struct inode *inode = mapping->host;
2926 struct compress_ctx cc = {
2927 .inode = inode,
2928 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2929 .cluster_size = F2FS_I(inode)->i_cluster_size,
2930 .cluster_idx = NULL_CLUSTER,
2931 .rpages = NULL,
2932 .nr_rpages = 0,
2933 .cpages = NULL,
2934 .valid_nr_cpages = 0,
2935 .rbuf = NULL,
2936 .cbuf = NULL,
2937 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2938 .private = NULL,
2939 };
2940 #endif
2941 int nr_folios;
2942 pgoff_t index;
2943 pgoff_t end; /* Inclusive */
2944 pgoff_t done_index;
2945 int range_whole = 0;
2946 xa_mark_t tag;
2947 int nwritten = 0;
2948 int submitted = 0;
2949 int i;
2950
2951 folio_batch_init(&fbatch);
2952
2953 if (get_dirty_pages(mapping->host) <=
2954 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2955 set_inode_flag(mapping->host, FI_HOT_DATA);
2956 else
2957 clear_inode_flag(mapping->host, FI_HOT_DATA);
2958
2959 if (wbc->range_cyclic) {
2960 index = mapping->writeback_index; /* prev offset */
2961 end = -1;
2962 } else {
2963 index = wbc->range_start >> PAGE_SHIFT;
2964 end = wbc->range_end >> PAGE_SHIFT;
2965 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2966 range_whole = 1;
2967 }
2968 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2969 tag = PAGECACHE_TAG_TOWRITE;
2970 else
2971 tag = PAGECACHE_TAG_DIRTY;
2972 retry:
2973 retry = 0;
2974 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2975 tag_pages_for_writeback(mapping, index, end);
2976 done_index = index;
2977 while (!done && !retry && (index <= end)) {
2978 nr_folios = filemap_get_folios_tag(mapping, &index, end,
2979 tag, &fbatch);
2980 if (nr_folios == 0)
2981 break;
2982
2983 for (i = 0; i < nr_folios; i++) {
2984 struct folio *folio = fbatch.folios[i];
2985 bool need_readd;
2986 readd:
2987 need_readd = false;
2988 #ifdef CONFIG_F2FS_FS_COMPRESSION
2989 if (f2fs_compressed_file(inode)) {
2990 void *fsdata = NULL;
2991 struct page *pagep;
2992 int ret2;
2993
2994 ret = f2fs_init_compress_ctx(&cc);
2995 if (ret) {
2996 done = 1;
2997 break;
2998 }
2999
3000 if (!f2fs_cluster_can_merge_page(&cc,
3001 folio->index)) {
3002 ret = f2fs_write_multi_pages(&cc,
3003 &submitted, wbc, io_type);
3004 if (!ret)
3005 need_readd = true;
3006 goto result;
3007 }
3008
3009 if (unlikely(f2fs_cp_error(sbi)))
3010 goto lock_folio;
3011
3012 if (!f2fs_cluster_is_empty(&cc))
3013 goto lock_folio;
3014
3015 if (f2fs_all_cluster_page_ready(&cc,
> 3016 &fbatch, i, nr_pages, true))
> 3017 goto lock_page;
3018
3019 ret2 = f2fs_prepare_compress_overwrite(
3020 inode, &pagep,
3021 folio->index, &fsdata);
3022 if (ret2 < 0) {
3023 ret = ret2;
3024 done = 1;
3025 break;
3026 } else if (ret2 &&
3027 (!f2fs_compress_write_end(inode,
3028 fsdata, folio->index, 1) ||
3029 !f2fs_all_cluster_page_ready(&cc,
3030 &fbatch, i, nr_folios,
3031 false))) {
3032 retry = 1;
3033 break;
3034 }
3035 }
3036 #endif
3037 /* give a priority to WB_SYNC threads */
3038 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3039 wbc->sync_mode == WB_SYNC_NONE) {
3040 done = 1;
3041 break;
3042 }
3043 #ifdef CONFIG_F2FS_FS_COMPRESSION
3044 lock_folio:
3045 #endif
3046 done_index = folio->index;
3047 retry_write:
3048 folio_lock(folio);
3049
3050 if (unlikely(folio->mapping != mapping)) {
3051 continue_unlock:
3052 folio_unlock(folio);
3053 continue;
3054 }
3055
3056 if (!folio_test_dirty(folio)) {
3057 /* someone wrote it for us */
3058 goto continue_unlock;
3059 }
3060
3061 if (folio_test_writeback(folio)) {
3062 if (wbc->sync_mode != WB_SYNC_NONE)
3063 f2fs_wait_on_page_writeback(
3064 &folio->page,
3065 DATA, true, true);
3066 else
3067 goto continue_unlock;
3068 }
3069
3070 if (!folio_clear_dirty_for_io(folio))
3071 goto continue_unlock;
3072
--
0-DAY CI Kernel Test Service
https://01.org/lkp
Hi "Vishal,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on jaegeuk-f2fs/dev-test]
[also build test ERROR on kdave/for-next linus/master v6.0-rc3]
[cannot apply to ceph-client/for-linus next-20220901]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Vishal-Moola-Oracle/Convert-to-filemap_get_folios_tag/20220902-060430
base: https://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git dev-test
config: hexagon-randconfig-r045-20220901 (https://download.01.org/0day-ci/archive/20220903/202209030346.t02z8VfY-lkp@intel.com/config)
compiler: clang version 16.0.0 (https://github.com/llvm/llvm-project c55b41d5199d2394dd6cdb8f52180d8b81d809d4)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/intel-lab-lkp/linux/commit/6c74320953cd3749db95f9f09c1fc7d044933635
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Vishal-Moola-Oracle/Convert-to-filemap_get_folios_tag/20220902-060430
git checkout 6c74320953cd3749db95f9f09c1fc7d044933635
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=hexagon SHELL=/bin/bash fs/f2fs/
If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
>> fs/f2fs/data.c:3016:18: error: use of undeclared identifier 'nr_pages'; did you mean 'dir_pages'?
&fbatch, i, nr_pages, true))
^~~~~~~~
dir_pages
include/linux/pagemap.h:1404:29: note: 'dir_pages' declared here
static inline unsigned long dir_pages(struct inode *inode)
^
>> fs/f2fs/data.c:3017:11: error: use of undeclared label 'lock_page'
goto lock_page;
^
2 errors generated.
vim +3016 fs/f2fs/data.c
2908
2909 /*
2910 * This function was copied from write_cche_pages from mm/page-writeback.c.
2911 * The major change is making write step of cold data page separately from
2912 * warm/hot data page.
2913 */
2914 static int f2fs_write_cache_pages(struct address_space *mapping,
2915 struct writeback_control *wbc,
2916 enum iostat_type io_type)
2917 {
2918 int ret = 0;
2919 int done = 0, retry = 0;
2920 struct folio_batch fbatch;
2921 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2922 struct bio *bio = NULL;
2923 sector_t last_block;
2924 #ifdef CONFIG_F2FS_FS_COMPRESSION
2925 struct inode *inode = mapping->host;
2926 struct compress_ctx cc = {
2927 .inode = inode,
2928 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2929 .cluster_size = F2FS_I(inode)->i_cluster_size,
2930 .cluster_idx = NULL_CLUSTER,
2931 .rpages = NULL,
2932 .nr_rpages = 0,
2933 .cpages = NULL,
2934 .valid_nr_cpages = 0,
2935 .rbuf = NULL,
2936 .cbuf = NULL,
2937 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2938 .private = NULL,
2939 };
2940 #endif
2941 int nr_folios;
2942 pgoff_t index;
2943 pgoff_t end; /* Inclusive */
2944 pgoff_t done_index;
2945 int range_whole = 0;
2946 xa_mark_t tag;
2947 int nwritten = 0;
2948 int submitted = 0;
2949 int i;
2950
2951 folio_batch_init(&fbatch);
2952
2953 if (get_dirty_pages(mapping->host) <=
2954 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2955 set_inode_flag(mapping->host, FI_HOT_DATA);
2956 else
2957 clear_inode_flag(mapping->host, FI_HOT_DATA);
2958
2959 if (wbc->range_cyclic) {
2960 index = mapping->writeback_index; /* prev offset */
2961 end = -1;
2962 } else {
2963 index = wbc->range_start >> PAGE_SHIFT;
2964 end = wbc->range_end >> PAGE_SHIFT;
2965 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2966 range_whole = 1;
2967 }
2968 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2969 tag = PAGECACHE_TAG_TOWRITE;
2970 else
2971 tag = PAGECACHE_TAG_DIRTY;
2972 retry:
2973 retry = 0;
2974 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2975 tag_pages_for_writeback(mapping, index, end);
2976 done_index = index;
2977 while (!done && !retry && (index <= end)) {
2978 nr_folios = filemap_get_folios_tag(mapping, &index, end,
2979 tag, &fbatch);
2980 if (nr_folios == 0)
2981 break;
2982
2983 for (i = 0; i < nr_folios; i++) {
2984 struct folio *folio = fbatch.folios[i];
2985 bool need_readd;
2986 readd:
2987 need_readd = false;
2988 #ifdef CONFIG_F2FS_FS_COMPRESSION
2989 if (f2fs_compressed_file(inode)) {
2990 void *fsdata = NULL;
2991 struct page *pagep;
2992 int ret2;
2993
2994 ret = f2fs_init_compress_ctx(&cc);
2995 if (ret) {
2996 done = 1;
2997 break;
2998 }
2999
3000 if (!f2fs_cluster_can_merge_page(&cc,
3001 folio->index)) {
3002 ret = f2fs_write_multi_pages(&cc,
3003 &submitted, wbc, io_type);
3004 if (!ret)
3005 need_readd = true;
3006 goto result;
3007 }
3008
3009 if (unlikely(f2fs_cp_error(sbi)))
3010 goto lock_folio;
3011
3012 if (!f2fs_cluster_is_empty(&cc))
3013 goto lock_folio;
3014
3015 if (f2fs_all_cluster_page_ready(&cc,
> 3016 &fbatch, i, nr_pages, true))
> 3017 goto lock_page;
3018
3019 ret2 = f2fs_prepare_compress_overwrite(
3020 inode, &pagep,
3021 folio->index, &fsdata);
3022 if (ret2 < 0) {
3023 ret = ret2;
3024 done = 1;
3025 break;
3026 } else if (ret2 &&
3027 (!f2fs_compress_write_end(inode,
3028 fsdata, folio->index, 1) ||
3029 !f2fs_all_cluster_page_ready(&cc,
3030 &fbatch, i, nr_folios,
3031 false))) {
3032 retry = 1;
3033 break;
3034 }
3035 }
3036 #endif
3037 /* give a priority to WB_SYNC threads */
3038 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3039 wbc->sync_mode == WB_SYNC_NONE) {
3040 done = 1;
3041 break;
3042 }
3043 #ifdef CONFIG_F2FS_FS_COMPRESSION
3044 lock_folio:
3045 #endif
3046 done_index = folio->index;
3047 retry_write:
3048 folio_lock(folio);
3049
3050 if (unlikely(folio->mapping != mapping)) {
3051 continue_unlock:
3052 folio_unlock(folio);
3053 continue;
3054 }
3055
3056 if (!folio_test_dirty(folio)) {
3057 /* someone wrote it for us */
3058 goto continue_unlock;
3059 }
3060
3061 if (folio_test_writeback(folio)) {
3062 if (wbc->sync_mode != WB_SYNC_NONE)
3063 f2fs_wait_on_page_writeback(
3064 &folio->page,
3065 DATA, true, true);
3066 else
3067 goto continue_unlock;
3068 }
3069
3070 if (!folio_clear_dirty_for_io(folio))
3071 goto continue_unlock;
3072
--
0-DAY CI Kernel Test Service
https://01.org/lkp
© 2016 - 2026 Red Hat, Inc.