Add proper bio_split() error handling. For any error, call
raid_end_bio_io() and return.
For the case of an in the write path, we need to undo the increment in
the rdev pending count and NULLify the r1_bio->bios[] pointers.
For read path failure, we need to undo rdev pending count increment from
the earlier read_balance() call.
Signed-off-by: John Garry <john.g.garry@oracle.com>
---
drivers/md/raid1.c | 33 +++++++++++++++++++++++++++++++--
1 file changed, 31 insertions(+), 2 deletions(-)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6c9d24203f39..7e023e9303c8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
const enum req_op op = bio_op(bio);
const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
int max_sectors;
- int rdisk;
+ int rdisk, error;
bool r1bio_existed = !!r1_bio;
/*
@@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
+
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
submit_bio_noacct(bio);
bio = split;
@@ -1410,6 +1415,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r1_bio;
mddev_trace_remap(mddev, read_bio, r1_bio->sector);
submit_bio_noacct(read_bio);
+ return;
+
+err_handle:
+ atomic_dec(&mirror->rdev->nr_pending);
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ raid_end_bio_io(r1_bio);
}
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@@ -1417,7 +1429,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- int i, disks;
+ int i, disks, k, error;
unsigned long flags;
struct md_rdev *blocked_rdev;
int first_clone;
@@ -1576,6 +1588,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (max_sectors < bio_sectors(bio)) {
struct bio *split = bio_split(bio, max_sectors,
GFP_NOIO, &conf->bio_split);
+
+ if (IS_ERR(split)) {
+ error = PTR_ERR(split);
+ goto err_handle;
+ }
bio_chain(split, bio);
submit_bio_noacct(bio);
bio = split;
@@ -1660,6 +1677,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
/* In case raid1d snuck in to freeze_array */
wake_up_barrier(conf);
+ return;
+err_handle:
+ for (k = 0; k < i; k++) {
+ if (r1_bio->bios[k]) {
+ rdev_dec_pending(conf->mirrors[k].rdev, mddev);
+ r1_bio->bios[k] = NULL;
+ }
+ }
+
+ bio->bi_status = errno_to_blk_status(error);
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
+ raid_end_bio_io(r1_bio);
}
static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
--
2.31.1
On 10/31/24 10:59, John Garry wrote: > Add proper bio_split() error handling. For any error, call > raid_end_bio_io() and return. > > For the case of an in the write path, we need to undo the increment in > the rdev pending count and NULLify the r1_bio->bios[] pointers. > > For read path failure, we need to undo rdev pending count increment from > the earlier read_balance() call. > > Signed-off-by: John Garry <john.g.garry@oracle.com> > --- > drivers/md/raid1.c | 33 +++++++++++++++++++++++++++++++-- > 1 file changed, 31 insertions(+), 2 deletions(-) > Reviewed-by: Hannes Reinecke <hare@suse.de> Cheers, Hannes -- Dr. Hannes Reinecke Kernel Storage Architect hare@suse.de +49 911 74053 688 SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
在 2024/10/31 17:59, John Garry 写道:
> Add proper bio_split() error handling. For any error, call
> raid_end_bio_io() and return.
>
> For the case of an in the write path, we need to undo the increment in
> the rdev pending count and NULLify the r1_bio->bios[] pointers.
>
> For read path failure, we need to undo rdev pending count increment from
> the earlier read_balance() call.
>
> Signed-off-by: John Garry <john.g.garry@oracle.com>
> ---
> drivers/md/raid1.c | 33 +++++++++++++++++++++++++++++++--
> 1 file changed, 31 insertions(+), 2 deletions(-)
>
LGTM
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 6c9d24203f39..7e023e9303c8 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
> const enum req_op op = bio_op(bio);
> const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
> int max_sectors;
> - int rdisk;
> + int rdisk, error;
> bool r1bio_existed = !!r1_bio;
>
> /*
> @@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
> if (max_sectors < bio_sectors(bio)) {
> struct bio *split = bio_split(bio, max_sectors,
> gfp, &conf->bio_split);
> +
> + if (IS_ERR(split)) {
> + error = PTR_ERR(split);
> + goto err_handle;
> + }
> bio_chain(split, bio);
> submit_bio_noacct(bio);
> bio = split;
> @@ -1410,6 +1415,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
> read_bio->bi_private = r1_bio;
> mddev_trace_remap(mddev, read_bio, r1_bio->sector);
> submit_bio_noacct(read_bio);
> + return;
> +
> +err_handle:
> + atomic_dec(&mirror->rdev->nr_pending);
> + bio->bi_status = errno_to_blk_status(error);
> + set_bit(R1BIO_Uptodate, &r1_bio->state);
> + raid_end_bio_io(r1_bio);
> }
>
> static void raid1_write_request(struct mddev *mddev, struct bio *bio,
> @@ -1417,7 +1429,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
> {
> struct r1conf *conf = mddev->private;
> struct r1bio *r1_bio;
> - int i, disks;
> + int i, disks, k, error;
> unsigned long flags;
> struct md_rdev *blocked_rdev;
> int first_clone;
> @@ -1576,6 +1588,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
> if (max_sectors < bio_sectors(bio)) {
> struct bio *split = bio_split(bio, max_sectors,
> GFP_NOIO, &conf->bio_split);
> +
> + if (IS_ERR(split)) {
> + error = PTR_ERR(split);
> + goto err_handle;
> + }
> bio_chain(split, bio);
> submit_bio_noacct(bio);
> bio = split;
> @@ -1660,6 +1677,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>
> /* In case raid1d snuck in to freeze_array */
> wake_up_barrier(conf);
> + return;
> +err_handle:
> + for (k = 0; k < i; k++) {
> + if (r1_bio->bios[k]) {
> + rdev_dec_pending(conf->mirrors[k].rdev, mddev);
> + r1_bio->bios[k] = NULL;
> + }
> + }
> +
> + bio->bi_status = errno_to_blk_status(error);
> + set_bit(R1BIO_Uptodate, &r1_bio->state);
> + raid_end_bio_io(r1_bio);
> }
>
> static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
>
© 2016 - 2026 Red Hat, Inc.