Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
fs/iomap/buffered-io.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 16a1e82e3aeb..08d119b62cf5 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -241,6 +241,9 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
struct page *page = ctx->cur_page;
struct iomap_page *iop = iomap_page_create(inode, page);
bool same_page = false, is_contig = false;
+ struct block_device *bdev = iomap->bdev;
+ unsigned opf = ctx->rac ? REQ_RAHEAD : 0;
+ unsigned op = REQ_OP_READ;
loff_t orig_pos = pos;
unsigned poff, plen;
sector_t sector;
@@ -285,19 +288,14 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (ctx->rac) /* same as readahead_gfp_mask */
gfp |= __GFP_NORETRY | __GFP_NOWARN;
- ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
+ ctx->bio = bio_new(bdev, sector, op, opf, gfp, nr_vecs);
/*
* If the bio_alloc fails, try it again for a single page to
* avoid having to deal with partial page reads. This emulates
* what do_mpage_readpage does.
*/
if (!ctx->bio)
- ctx->bio = bio_alloc(orig_gfp, 1);
- ctx->bio->bi_opf = REQ_OP_READ;
- if (ctx->rac)
- ctx->bio->bi_opf |= REQ_RAHEAD;
- ctx->bio->bi_iter.bi_sector = sector;
- bio_set_dev(ctx->bio, iomap->bdev);
+ ctx->bio = bio_new(bdev, sector, op, opf, orig_gfp, 1);
ctx->bio->bi_end_io = iomap_read_end_io;
}
--
2.22.1