fs/ntfs3/frecord.c | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-)
ntfs_lock_new_page() currently returns a struct page * but it primarily
operates on folios via __filemap_get_folio(). Convert it to return a
struct folio * and use folio_alloc() + __folio_set_locked() for the
temporary page used to avoid data corruption during decompression.
When the cached folio is not uptodate, preserve the existing behavior by
using folio_file_page() and converting the returned page back to a
folio.
Update ni_readpage_cmpr() and ni_decompress_file() to handle the new
return type while keeping the existing struct page * array and the
unlock_page()/put_page() cleanup paths unchanged.
Signed-off-by: Sun Jian <sun.jian.kdev@gmail.com>
---
v2:
- Fix build failure in ni_decompress_file() by switching the local
pointer to struct folio * (reported by kernel test robot).
- Reported-by: kernel test robot <lkp@intel.com>
- Closes: https://lore.kernel.org/oe-kbuild-all/202602072013.jwrURE2e-lkp@intel.com/
- Closes: https://lore.kernel.org/oe-kbuild-all/202602071921.nGIiI1J5-lkp@intel.com/
fs/ntfs3/frecord.c | 43 ++++++++++++++++++++++++-------------------
1 file changed, 24 insertions(+), 19 deletions(-)
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 641ddaf8d4a0..6169257cf784 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -2022,27 +2022,31 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
return err;
}
-static struct page *ntfs_lock_new_page(struct address_space *mapping,
- pgoff_t index, gfp_t gfp)
+static struct folio *ntfs_lock_new_page(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp)
{
struct folio *folio = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
- struct page *page;
if (IS_ERR(folio))
- return ERR_CAST(folio);
+ return folio;
- if (!folio_test_uptodate(folio))
- return folio_file_page(folio, index);
+ if (!folio_test_uptodate(folio)) {
+ struct page *page = folio_file_page(folio, index);
+
+ if (IS_ERR(page))
+ return ERR_CAST(page);
+ return page_folio(page);
+ }
/* Use a temporary page to avoid data corruption */
folio_unlock(folio);
folio_put(folio);
- page = alloc_page(gfp);
- if (!page)
+ folio = folio_alloc(gfp, 0);
+ if (!folio)
return ERR_PTR(-ENOMEM);
- __SetPageLocked(page);
- return page;
+ __folio_set_locked(folio);
+ return folio;
}
/*
@@ -2064,6 +2068,7 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio)
u32 i, idx, frame_size, pages_per_frame;
gfp_t gfp_mask;
struct page *pg;
+ struct folio *f;
if (vbo >= i_size_read(&ni->vfs_inode)) {
folio_zero_range(folio, 0, folio_size(folio));
@@ -2099,12 +2104,12 @@ int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio)
if (i == idx)
continue;
- pg = ntfs_lock_new_page(mapping, index, gfp_mask);
- if (IS_ERR(pg)) {
- err = PTR_ERR(pg);
+ f = ntfs_lock_new_page(mapping, index, gfp_mask);
+ if (IS_ERR(f)) {
+ err = PTR_ERR(f);
goto out1;
}
- pages[i] = pg;
+ pages[i] = &f->page;
}
err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame, 0);
@@ -2190,18 +2195,18 @@ int ni_decompress_file(struct ntfs_inode *ni)
}
for (i = 0; i < pages_per_frame; i++, index++) {
- struct page *pg;
+ struct folio *f;
- pg = ntfs_lock_new_page(mapping, index, gfp_mask);
- if (IS_ERR(pg)) {
+ f = ntfs_lock_new_page(mapping, index, gfp_mask);
+ if (IS_ERR(f)) {
while (i--) {
unlock_page(pages[i]);
put_page(pages[i]);
}
- err = PTR_ERR(pg);
+ err = PTR_ERR(f);
goto out;
}
- pages[i] = pg;
+ pages[i] = &f->page;
}
err = ni_read_frame(ni, vbo, pages, pages_per_frame, 1);
base-commit: 2687c848e57820651b9f69d30c4710f4219f7dbf
--
2.43.0
© 2016 - 2026 Red Hat, Inc.