mm/readahead.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-)
page_cache_ra_order() and page_cache_ra_unbounded() read mapping minimum folio
constraints before taking the invalidate lock, allowing concurrent changes to
violate page cache invariants.
Move the lookups under filemap_invalidate_lock_shared() to ensure readahead
allocations respect the mapping constraints.
Fixes: 47dd67532303 ("block/bdev: lift block size restrictions to 64k")
Reported-by: syzbot+4d3cc33ef7a77041efa6@syzkaller.appspotmail.com
Reported-by: syzbot+fdba5cca73fee92c69d6@syzkaller.appspotmail.com
Signed-off-by: Jinchao Wang <wangjinchao600@gmail.com>
---
mm/readahead.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/mm/readahead.c b/mm/readahead.c
index b415c9969176..74acd6c4f87c 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -214,7 +214,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
unsigned long index = readahead_index(ractl);
gfp_t gfp_mask = readahead_gfp_mask(mapping);
unsigned long mark = ULONG_MAX, i = 0;
- unsigned int min_nrpages = mapping_min_folio_nrpages(mapping);
+ unsigned int min_nrpages;
/*
* Partway through the readahead operation, we will have added
@@ -232,6 +232,7 @@ void page_cache_ra_unbounded(struct readahead_control *ractl,
lookahead_size);
filemap_invalidate_lock_shared(mapping);
index = mapping_align_index(mapping, index);
+ min_nrpages = mapping_min_folio_nrpages(mapping);
/*
* As iterator `i` is aligned to min_nrpages, round_up the
@@ -467,7 +468,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
struct address_space *mapping = ractl->mapping;
pgoff_t start = readahead_index(ractl);
pgoff_t index = start;
- unsigned int min_order = mapping_min_folio_order(mapping);
+ unsigned int min_order;
pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
pgoff_t mark = index + ra->size - ra->async_size;
unsigned int nofs;
@@ -485,13 +486,16 @@ void page_cache_ra_order(struct readahead_control *ractl,
new_order = min(mapping_max_folio_order(mapping), new_order);
new_order = min_t(unsigned int, new_order, ilog2(ra->size));
- new_order = max(new_order, min_order);
ra->order = new_order;
/* See comment in page_cache_ra_unbounded() */
nofs = memalloc_nofs_save();
filemap_invalidate_lock_shared(mapping);
+
+ min_order = mapping_min_folio_order(mapping);
+ new_order = max(new_order, min_order);
+
/*
* If the new_order is greater than min_order and index is
* already aligned to new_order, then this will be noop as index
--
2.43.0
On Mon, Dec 15, 2025 at 10:19:00PM +0800, Jinchao Wang wrote: > page_cache_ra_order() and page_cache_ra_unbounded() read mapping minimum folio > constraints before taking the invalidate lock, allowing concurrent changes to > violate page cache invariants. > > Move the lookups under filemap_invalidate_lock_shared() to ensure readahead > allocations respect the mapping constraints. Why are the mapping folio size constraints being changed? They're supposed to be set at inode instantiation and then never changed.
© 2016 - 2025 Red Hat, Inc.