:p
atchew
Login
bio_split() error handling could be improved as follows: - Instead of returning NULL for an error - which is vague - return a PTR_ERR, which may hint what went wrong. - Remove BUG_ON() calls - which are generally not preferred - and instead WARN and pass an error code back to the caller. Many callers of bio_split() don't check the return code. As such, for an error we would be getting a crash still from an invalid pointer dereference. Most bio_split() callers don't check the return value. However, it could be argued the bio_split() calls should not fail. So far I have just fixed up the md RAID code to handle these errors, as that is my interest now. Sending as an RFC as unsure if this is the right direction. The motivator for this series was initial md RAID atomic write support in https://lore.kernel.org/linux-block/21f19b4b-4b83-4ca2-a93b-0a433741fd26@oracle.com/ There I wanted to ensure that we don't split an atomic write bio, and it made more sense to handle this in bio_split() (instead of the bio_split() caller). John Garry (6): block: Rework bio_split() return value block: Error an attempt to split an atomic write in bio_split() block: Handle bio_split() errors in bio_submit_split() md/raid0: Handle bio_split() errors md/raid1: Handle bio_split() errors md/raid10: Handle bio_split() errors block/bio.c | 14 ++++++++++---- block/blk-crypto-fallback.c | 2 +- block/blk-merge.c | 5 +++++ drivers/md/raid0.c | 10 ++++++++++ drivers/md/raid1.c | 8 ++++++++ drivers/md/raid10.c | 18 ++++++++++++++++++ 6 files changed, 52 insertions(+), 5 deletions(-) -- 2.31.1
Instead of returning an inconclusive value of NULL for an error in calling bio_split(), return a ERR_PTR() always. Also remove the BUG_ON() calls, and WARN() instead. Indeed, since almost all callers don't check the return code from bio_split(), we'll crash anyway (for those failures). Signed-off-by: John Garry <john.g.garry@oracle.com> --- block/bio.c | 10 ++++++---- block/blk-crypto-fallback.c | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/block/bio.c b/block/bio.c index XXXXXXX..XXXXXXX 100644 --- a/block/bio.c +++ b/block/bio.c @@ -XXX,XX +XXX,XX @@ struct bio *bio_split(struct bio *bio, int sectors, { struct bio *split; - BUG_ON(sectors <= 0); - BUG_ON(sectors >= bio_sectors(bio)); + if (WARN_ON(sectors <= 0)) + return ERR_PTR(-EINVAL); + if (WARN_ON(sectors >= bio_sectors(bio))) + return ERR_PTR(-EINVAL); /* Zone append commands cannot be split */ if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) - return NULL; + return ERR_PTR(-EINVAL); split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); if (!split) - return NULL; + return ERR_PTR(-ENOMEM); split->bi_iter.bi_size = sectors << 9; diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index XXXXXXX..XXXXXXX 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -XXX,XX +XXX,XX @@ static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) split_bio = bio_split(bio, num_sectors, GFP_NOIO, &crypto_bio_split); - if (!split_bio) { + if (IS_ERR(split_bio)) { bio->bi_status = BLK_STS_RESOURCE; return false; } -- 2.31.1
This is disallowed. Signed-off-by: John Garry <john.g.garry@oracle.com> --- block/bio.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/block/bio.c b/block/bio.c index XXXXXXX..XXXXXXX 100644 --- a/block/bio.c +++ b/block/bio.c @@ -XXX,XX +XXX,XX @@ struct bio *bio_split(struct bio *bio, int sectors, if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) return ERR_PTR(-EINVAL); + /* atomic writes cannot be split */ + if (bio->bi_opf & REQ_ATOMIC) + return ERR_PTR(-EINVAL); + split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); if (!split) return ERR_PTR(-ENOMEM); -- 2.31.1
bio_split() may error, so check this. Signed-off-by: John Garry <john.g.garry@oracle.com> --- Should we move the WARN_ON_ONCE(bio_zone_write_plugging(bio)) call (not shown) in bio_submit_split() to bio_split()? block/blk-merge.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/block/blk-merge.c b/block/blk-merge.c index XXXXXXX..XXXXXXX 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -XXX,XX +XXX,XX @@ static struct bio *bio_submit_split(struct bio *bio, int split_sectors) split = bio_split(bio, split_sectors, GFP_NOIO, &bio->bi_bdev->bd_disk->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return NULL; + } split->bi_opf |= REQ_NOMERGE; blkcg_bio_issue_init(split); bio_chain(split, bio); -- 2.31.1
Add proper bio_split() error handling. For any error, set bi_status, end the bio, and return. Signed-off-by: John Garry <john.g.garry@oracle.com> --- drivers/md/raid0.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index XXXXXXX..XXXXXXX 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -XXX,XX +XXX,XX @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) struct bio *split = bio_split(bio, zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, &mddev->bio_set); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return; + } bio_chain(split, bio); submit_bio_noacct(bio); bio = split; @@ -XXX,XX +XXX,XX @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) if (sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, sectors, GFP_NOIO, &mddev->bio_set); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return true; + } bio_chain(split, bio); raid0_map_submit_bio(mddev, bio); bio = split; -- 2.31.1
Add proper bio_split() error handling. For any error, call raid_end_bio_io() and return; Signed-off-by: John Garry <john.g.garry@oracle.com> --- drivers/md/raid1.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index XXXXXXX..XXXXXXX 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -XXX,XX +XXX,XX @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); + if (IS_ERR(split)) { + raid_end_bio_io(r1_bio); + return; + } bio_chain(split, bio); submit_bio_noacct(bio); bio = split; @@ -XXX,XX +XXX,XX @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + raid_end_bio_io(r1_bio); + return; + } bio_chain(split, bio); submit_bio_noacct(bio); bio = split; -- 2.31.1
Add proper bio_split() error handling. For any read or write error, call raid_end_bio_io() and return. For discard errors, just end the bio with an error. Signed-off-by: John Garry <john.g.garry@oracle.com> --- drivers/md/raid10.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index XXXXXXX..XXXXXXX 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -XXX,XX +XXX,XX @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); + if (IS_ERR(split)) { + raid_end_bio_io(r10_bio); + return; + } bio_chain(split, bio); allow_barrier(conf); submit_bio_noacct(bio); @@ -XXX,XX +XXX,XX @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, if (r10_bio->sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, r10_bio->sectors, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + raid_end_bio_io(r10_bio); + return; + } bio_chain(split, bio); allow_barrier(conf); submit_bio_noacct(bio); @@ -XXX,XX +XXX,XX @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = stripe_size - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } bio_chain(split, bio); allow_barrier(conf); /* Resend the fist split part */ @@ -XXX,XX +XXX,XX @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = bio_sectors(bio) - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } bio_chain(split, bio); allow_barrier(conf); /* Resend the second split part */ -- 2.31.1
bio_split() error handling could be improved as follows: - Instead of returning NULL for an error - which is vague - return a PTR_ERR, which may hint what went wrong. - Remove BUG_ON() calls - which are generally not preferred - and instead WARN and pass an error code back to the caller. Many callers of bio_split() don't check the return code. As such, for an error we would be getting a crash still from an invalid pointer dereference. Most bio_split() callers don't check the return value. However, it could be argued the bio_split() calls should not fail. So far I have just fixed up the md RAID code to handle these errors, as that is my interest now. The motivator for this series was initial md RAID atomic write support in https://lore.kernel.org/linux-block/20241030094912.3960234-1-john.g.garry@oracle.com/T/#m5859ee900de8e6554d5bb027c0558f0147c32df8 There I wanted to ensure that we don't split an atomic write bio, and it made more sense to handle this in bio_split() (instead of the bio_split() caller). Based on 133008e84b99 (block/for-6.13/block) blk-integrity: remove seed for user mapped buffers Changes since v2: - Drop "block: Use BLK_STS_OK in bio_init()" change (Christoph) - Use proper rdev indexing in raid10_write_request() (Kuai) - Decrement rdev nr_pending in raid1 read error path (Kuai) - Add RB tags from Christoph, Johannes, and Kuai (thanks!) Changes since RFC: - proper handling to end the raid bio in all cases, and also pass back proper error code (Kuai) - Add WARN_ON_ERROR in bio_split() (Johannes, Christoph) - Add small patch to use BLK_STS_OK in bio_init() - Change bio_submit_split() error path (Christoph) John Garry (6): block: Rework bio_split() return value block: Error an attempt to split an atomic write in bio_split() block: Handle bio_split() errors in bio_submit_split() md/raid0: Handle bio_split() errors md/raid1: Handle bio_split() errors md/raid10: Handle bio_split() errors block/bio.c | 14 +++++++---- block/blk-crypto-fallback.c | 2 +- block/blk-merge.c | 15 ++++++++---- drivers/md/raid0.c | 12 ++++++++++ drivers/md/raid1.c | 33 ++++++++++++++++++++++++-- drivers/md/raid10.c | 47 ++++++++++++++++++++++++++++++++++++- 6 files changed, 110 insertions(+), 13 deletions(-) -- 2.31.1
Instead of returning an inconclusive value of NULL for an error in calling bio_split(), return a ERR_PTR() always. Also remove the BUG_ON() calls, and WARN_ON_ONCE() instead. Indeed, since almost all callers don't check the return code from bio_split(), we'll crash anyway (for those failures). Fix up the only user which checks bio_split() return code today (directly or indirectly), blk_crypto_fallback_split_bio_if_needed(). The md/bcache code does check the return code in cached_dev_cache_miss() -> bio_next_split() -> bio_split(), but only to see if there was a split, so there would be no change in behaviour here (when returning a ERR_PTR()). Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: John Garry <john.g.garry@oracle.com> --- block/bio.c | 10 ++++++---- block/blk-crypto-fallback.c | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/block/bio.c b/block/bio.c index XXXXXXX..XXXXXXX 100644 --- a/block/bio.c +++ b/block/bio.c @@ -XXX,XX +XXX,XX @@ struct bio *bio_split(struct bio *bio, int sectors, { struct bio *split; - BUG_ON(sectors <= 0); - BUG_ON(sectors >= bio_sectors(bio)); + if (WARN_ON_ONCE(sectors <= 0)) + return ERR_PTR(-EINVAL); + if (WARN_ON_ONCE(sectors >= bio_sectors(bio))) + return ERR_PTR(-EINVAL); /* Zone append commands cannot be split */ if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) - return NULL; + return ERR_PTR(-EINVAL); split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); if (!split) - return NULL; + return ERR_PTR(-ENOMEM); split->bi_iter.bi_size = sectors << 9; diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index XXXXXXX..XXXXXXX 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -XXX,XX +XXX,XX @@ static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr) split_bio = bio_split(bio, num_sectors, GFP_NOIO, &crypto_bio_split); - if (!split_bio) { + if (IS_ERR(split_bio)) { bio->bi_status = BLK_STS_RESOURCE; return false; } -- 2.31.1
This is disallowed. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: John Garry <john.g.garry@oracle.com> --- block/bio.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/block/bio.c b/block/bio.c index XXXXXXX..XXXXXXX 100644 --- a/block/bio.c +++ b/block/bio.c @@ -XXX,XX +XXX,XX @@ struct bio *bio_split(struct bio *bio, int sectors, if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) return ERR_PTR(-EINVAL); + /* atomic writes cannot be split */ + if (bio->bi_opf & REQ_ATOMIC) + return ERR_PTR(-EINVAL); + split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); if (!split) return ERR_PTR(-ENOMEM); -- 2.31.1
bio_split() may error, so check this. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: John Garry <john.g.garry@oracle.com> --- block/blk-merge.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index XXXXXXX..XXXXXXX 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -XXX,XX +XXX,XX @@ static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim) static struct bio *bio_submit_split(struct bio *bio, int split_sectors) { - if (unlikely(split_sectors < 0)) { - bio->bi_status = errno_to_blk_status(split_sectors); - bio_endio(bio); - return NULL; - } + if (unlikely(split_sectors < 0)) + goto error; if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) @@ -XXX,XX +XXX,XX @@ static struct bio *bio_submit_split(struct bio *bio, int split_sectors) split = bio_split(bio, split_sectors, GFP_NOIO, &bio->bi_bdev->bd_disk->bio_split); + if (IS_ERR(split)) { + split_sectors = PTR_ERR(split); + goto error; + } split->bi_opf |= REQ_NOMERGE; blkcg_bio_issue_init(split); bio_chain(split, bio); @@ -XXX,XX +XXX,XX @@ static struct bio *bio_submit_split(struct bio *bio, int split_sectors) } return bio; +error: + bio->bi_status = errno_to_blk_status(split_sectors); + bio_endio(bio); + return NULL; } struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, -- 2.31.1
Add proper bio_split() error handling. For any error, set bi_status, end the bio, and return. Reviewed-by: Yu Kuai <yukuai3@huawei.com> Signed-off-by: John Garry <john.g.garry@oracle.com> --- drivers/md/raid0.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index XXXXXXX..XXXXXXX 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -XXX,XX +XXX,XX @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) struct bio *split = bio_split(bio, zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO, &mddev->bio_set); + + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return; + } bio_chain(split, bio); submit_bio_noacct(bio); bio = split; @@ -XXX,XX +XXX,XX @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio) if (sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, sectors, GFP_NOIO, &mddev->bio_set); + + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return true; + } bio_chain(split, bio); raid0_map_submit_bio(mddev, bio); bio = split; -- 2.31.1
Add proper bio_split() error handling. For any error, call raid_end_bio_io() and return. For the case of an in the write path, we need to undo the increment in the rdev pending count and NULLify the r1_bio->bios[] pointers. For read path failure, we need to undo rdev pending count increment from the earlier read_balance() call. Signed-off-by: John Garry <john.g.garry@oracle.com> --- drivers/md/raid1.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index XXXXXXX..XXXXXXX 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -XXX,XX +XXX,XX @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, const enum req_op op = bio_op(bio); const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; int max_sectors; - int rdisk; + int rdisk, error; bool r1bio_existed = !!r1_bio; /* @@ -XXX,XX +XXX,XX @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); + + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } bio_chain(split, bio); submit_bio_noacct(bio); bio = split; @@ -XXX,XX +XXX,XX @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio, read_bio->bi_private = r1_bio; mddev_trace_remap(mddev, read_bio, r1_bio->sector); submit_bio_noacct(read_bio); + return; + +err_handle: + atomic_dec(&mirror->rdev->nr_pending); + bio->bi_status = errno_to_blk_status(error); + set_bit(R1BIO_Uptodate, &r1_bio->state); + raid_end_bio_io(r1_bio); } static void raid1_write_request(struct mddev *mddev, struct bio *bio, @@ -XXX,XX +XXX,XX @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, { struct r1conf *conf = mddev->private; struct r1bio *r1_bio; - int i, disks; + int i, disks, k, error; unsigned long flags; struct md_rdev *blocked_rdev; int first_clone; @@ -XXX,XX +XXX,XX @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, GFP_NOIO, &conf->bio_split); + + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } bio_chain(split, bio); submit_bio_noacct(bio); bio = split; @@ -XXX,XX +XXX,XX @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, /* In case raid1d snuck in to freeze_array */ wake_up_barrier(conf); + return; +err_handle: + for (k = 0; k < i; k++) { + if (r1_bio->bios[k]) { + rdev_dec_pending(conf->mirrors[k].rdev, mddev); + r1_bio->bios[k] = NULL; + } + } + + bio->bi_status = errno_to_blk_status(error); + set_bit(R1BIO_Uptodate, &r1_bio->state); + raid_end_bio_io(r1_bio); } static bool raid1_make_request(struct mddev *mddev, struct bio *bio) -- 2.31.1
Add proper bio_split() error handling. For any error, call raid_end_bio_io() and return. Except for discard, where we end the bio directly. Signed-off-by: John Garry <john.g.garry@oracle.com> --- drivers/md/raid10.c | 47 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index XXXXXXX..XXXXXXX 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -XXX,XX +XXX,XX @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, int slot = r10_bio->read_slot; struct md_rdev *err_rdev = NULL; gfp_t gfp = GFP_NOIO; + int error; if (slot >= 0 && r10_bio->devs[slot].rdev) { /* @@ -XXX,XX +XXX,XX @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, gfp, &conf->bio_split); + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } bio_chain(split, bio); allow_barrier(conf); submit_bio_noacct(bio); @@ -XXX,XX +XXX,XX @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, mddev_trace_remap(mddev, read_bio, r10_bio->sector); submit_bio_noacct(read_bio); return; +err_handle: + atomic_dec(&rdev->nr_pending); + bio->bi_status = errno_to_blk_status(error); + set_bit(R10BIO_Uptodate, &r10_bio->state); + raid_end_bio_io(r10_bio); } static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, @@ -XXX,XX +XXX,XX @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, struct r10bio *r10_bio) { struct r10conf *conf = mddev->private; - int i; + int i, k; sector_t sectors; int max_sectors; + int error; if ((mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, @@ -XXX,XX +XXX,XX @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, if (r10_bio->sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, r10_bio->sectors, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + error = PTR_ERR(split); + goto err_handle; + } bio_chain(split, bio); allow_barrier(conf); submit_bio_noacct(bio); @@ -XXX,XX +XXX,XX @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, raid10_write_one_disk(mddev, r10_bio, bio, true, i); } one_write_done(r10_bio); + return; +err_handle: + for (k = 0; k < i; k++) { + int d = r10_bio->devs[k].devnum; + struct md_rdev *rdev = conf->mirrors[d].rdev; + struct md_rdev *rrdev = conf->mirrors[d].replacement; + + if (r10_bio->devs[k].bio) { + rdev_dec_pending(rdev, mddev); + r10_bio->devs[k].bio = NULL; + } + if (r10_bio->devs[k].repl_bio) { + rdev_dec_pending(rrdev, mddev); + r10_bio->devs[k].repl_bio = NULL; + } + } + + bio->bi_status = errno_to_blk_status(error); + set_bit(R10BIO_Uptodate, &r10_bio->state); + raid_end_bio_io(r10_bio); } static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) @@ -XXX,XX +XXX,XX @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = stripe_size - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } bio_chain(split, bio); allow_barrier(conf); /* Resend the fist split part */ @@ -XXX,XX +XXX,XX @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio) if (remainder) { split_size = bio_sectors(bio) - remainder; split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split); + if (IS_ERR(split)) { + bio->bi_status = errno_to_blk_status(PTR_ERR(split)); + bio_endio(bio); + return 0; + } bio_chain(split, bio); allow_barrier(conf); /* Resend the second split part */ -- 2.31.1