[PATCH v2 6/7] md/raid1: Handle bio_split() errors

John Garry posted 7 patches 3 weeks, 6 days ago
There is a newer version of this series
[PATCH v2 6/7] md/raid1: Handle bio_split() errors
Posted by John Garry 3 weeks, 6 days ago
Add proper bio_split() error handling. For any error, call
raid_end_bio_io() and return.

For the case of an in the write path, we need to undo the increment in
the rdev panding count and NULLify the r1_bio->bios[] pointers.

Signed-off-by: John Garry <john.g.garry@oracle.com>
---
 drivers/md/raid1.c | 32 ++++++++++++++++++++++++++++++--
 1 file changed, 30 insertions(+), 2 deletions(-)

diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6c9d24203f39..a10018282629 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
 	const enum req_op op = bio_op(bio);
 	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
 	int max_sectors;
-	int rdisk;
+	int rdisk, error;
 	bool r1bio_existed = !!r1_bio;
 
 	/*
@@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
 	if (max_sectors < bio_sectors(bio)) {
 		struct bio *split = bio_split(bio, max_sectors,
 					      gfp, &conf->bio_split);
+
+		if (IS_ERR(split)) {
+			error = PTR_ERR(split);
+			goto err_handle;
+		}
 		bio_chain(split, bio);
 		submit_bio_noacct(bio);
 		bio = split;
@@ -1410,6 +1415,12 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
 	read_bio->bi_private = r1_bio;
 	mddev_trace_remap(mddev, read_bio, r1_bio->sector);
 	submit_bio_noacct(read_bio);
+	return;
+
+err_handle:
+	bio->bi_status = errno_to_blk_status(error);
+	set_bit(R1BIO_Uptodate, &r1_bio->state);
+	raid_end_bio_io(r1_bio);
 }
 
 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
@@ -1417,7 +1428,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 {
 	struct r1conf *conf = mddev->private;
 	struct r1bio *r1_bio;
-	int i, disks;
+	int i, disks, k, error;
 	unsigned long flags;
 	struct md_rdev *blocked_rdev;
 	int first_clone;
@@ -1576,6 +1587,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 	if (max_sectors < bio_sectors(bio)) {
 		struct bio *split = bio_split(bio, max_sectors,
 					      GFP_NOIO, &conf->bio_split);
+
+		if (IS_ERR(split)) {
+			error = PTR_ERR(split);
+			goto err_handle;
+		}
 		bio_chain(split, bio);
 		submit_bio_noacct(bio);
 		bio = split;
@@ -1660,6 +1676,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
 	/* In case raid1d snuck in to freeze_array */
 	wake_up_barrier(conf);
+	return;
+err_handle:
+	for (k = 0; k < i; k++) {
+		if (r1_bio->bios[k]) {
+			rdev_dec_pending(conf->mirrors[k].rdev, mddev);
+			r1_bio->bios[k] = NULL;
+		}
+	}
+
+	bio->bi_status = errno_to_blk_status(error);
+	set_bit(R1BIO_Uptodate, &r1_bio->state);
+	raid_end_bio_io(r1_bio);
 }
 
 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
-- 
2.31.1
Re: [PATCH v2 6/7] md/raid1: Handle bio_split() errors
Posted by Yu Kuai 3 weeks, 5 days ago
Hi,

在 2024/10/28 23:27, John Garry 写道:
> Add proper bio_split() error handling. For any error, call
> raid_end_bio_io() and return.
> 
> For the case of an in the write path, we need to undo the increment in
> the rdev panding count and NULLify the r1_bio->bios[] pointers.
> 
> Signed-off-by: John Garry <john.g.garry@oracle.com>
> ---
>   drivers/md/raid1.c | 32 ++++++++++++++++++++++++++++++--
>   1 file changed, 30 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 6c9d24203f39..a10018282629 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
>   	const enum req_op op = bio_op(bio);
>   	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
>   	int max_sectors;
> -	int rdisk;
> +	int rdisk, error;
>   	bool r1bio_existed = !!r1_bio;
>   
>   	/*
> @@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
>   	if (max_sectors < bio_sectors(bio)) {
>   		struct bio *split = bio_split(bio, max_sectors,
>   					      gfp, &conf->bio_split);
> +
> +		if (IS_ERR(split)) {
> +			error = PTR_ERR(split);
> +			goto err_handle;
> +		}
>   		bio_chain(split, bio);
>   		submit_bio_noacct(bio);
>   		bio = split;
> @@ -1410,6 +1415,12 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
>   	read_bio->bi_private = r1_bio;
>   	mddev_trace_remap(mddev, read_bio, r1_bio->sector);
>   	submit_bio_noacct(read_bio);
> +	return;
> +
> +err_handle:
> +	bio->bi_status = errno_to_blk_status(error);
> +	set_bit(R1BIO_Uptodate, &r1_bio->state);
> +	raid_end_bio_io(r1_bio);

rdev_dec_pending() is missed here. :)

Thanks,
Kuai

>   }
>   
>   static void raid1_write_request(struct mddev *mddev, struct bio *bio,
> @@ -1417,7 +1428,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>   {
>   	struct r1conf *conf = mddev->private;
>   	struct r1bio *r1_bio;
> -	int i, disks;
> +	int i, disks, k, error;
>   	unsigned long flags;
>   	struct md_rdev *blocked_rdev;
>   	int first_clone;
> @@ -1576,6 +1587,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>   	if (max_sectors < bio_sectors(bio)) {
>   		struct bio *split = bio_split(bio, max_sectors,
>   					      GFP_NOIO, &conf->bio_split);
> +
> +		if (IS_ERR(split)) {
> +			error = PTR_ERR(split);
> +			goto err_handle;
> +		}
>   		bio_chain(split, bio);
>   		submit_bio_noacct(bio);
>   		bio = split;
> @@ -1660,6 +1676,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>   
>   	/* In case raid1d snuck in to freeze_array */
>   	wake_up_barrier(conf);
> +	return;
> +err_handle:
> +	for (k = 0; k < i; k++) {
> +		if (r1_bio->bios[k]) {
> +			rdev_dec_pending(conf->mirrors[k].rdev, mddev);
> +			r1_bio->bios[k] = NULL;
> +		}
> +	}
> +
> +	bio->bi_status = errno_to_blk_status(error);
> +	set_bit(R1BIO_Uptodate, &r1_bio->state);
> +	raid_end_bio_io(r1_bio);
>   }
>   
>   static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
> 

Re: [PATCH v2 6/7] md/raid1: Handle bio_split() errors
Posted by John Garry 3 weeks, 5 days ago
On 29/10/2024 12:12, Yu Kuai wrote:
>> +err_handle:
>> +    bio->bi_status = errno_to_blk_status(error);
>> +    set_bit(R1BIO_Uptodate, &r1_bio->state);
>> +    raid_end_bio_io(r1_bio);
> 
> rdev_dec_pending() is missed here. 🙂

ok, I will fix. I am not sure sure how I missed this...

And I will drop your RB tag.

Cheers!
Re: [PATCH v2 6/7] md/raid1: Handle bio_split() errors
Posted by Yu Kuai 3 weeks, 5 days ago
在 2024/10/28 23:27, John Garry 写道:
> Add proper bio_split() error handling. For any error, call
> raid_end_bio_io() and return.
> 
> For the case of an in the write path, we need to undo the increment in
> the rdev panding count and NULLify the r1_bio->bios[] pointers.
> 
> Signed-off-by: John Garry <john.g.garry@oracle.com>

LGTM
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
> ---
>   drivers/md/raid1.c | 32 ++++++++++++++++++++++++++++++--
>   1 file changed, 30 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 6c9d24203f39..a10018282629 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
>   	const enum req_op op = bio_op(bio);
>   	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
>   	int max_sectors;
> -	int rdisk;
> +	int rdisk, error;
>   	bool r1bio_existed = !!r1_bio;
>   
>   	/*
> @@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
>   	if (max_sectors < bio_sectors(bio)) {
>   		struct bio *split = bio_split(bio, max_sectors,
>   					      gfp, &conf->bio_split);
> +
> +		if (IS_ERR(split)) {
> +			error = PTR_ERR(split);
> +			goto err_handle;
> +		}
>   		bio_chain(split, bio);
>   		submit_bio_noacct(bio);
>   		bio = split;
> @@ -1410,6 +1415,12 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
>   	read_bio->bi_private = r1_bio;
>   	mddev_trace_remap(mddev, read_bio, r1_bio->sector);
>   	submit_bio_noacct(read_bio);
> +	return;
> +
> +err_handle:
> +	bio->bi_status = errno_to_blk_status(error);
> +	set_bit(R1BIO_Uptodate, &r1_bio->state);
> +	raid_end_bio_io(r1_bio);
>   }
>   
>   static void raid1_write_request(struct mddev *mddev, struct bio *bio,
> @@ -1417,7 +1428,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>   {
>   	struct r1conf *conf = mddev->private;
>   	struct r1bio *r1_bio;
> -	int i, disks;
> +	int i, disks, k, error;
>   	unsigned long flags;
>   	struct md_rdev *blocked_rdev;
>   	int first_clone;
> @@ -1576,6 +1587,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>   	if (max_sectors < bio_sectors(bio)) {
>   		struct bio *split = bio_split(bio, max_sectors,
>   					      GFP_NOIO, &conf->bio_split);
> +
> +		if (IS_ERR(split)) {
> +			error = PTR_ERR(split);
> +			goto err_handle;
> +		}
>   		bio_chain(split, bio);
>   		submit_bio_noacct(bio);
>   		bio = split;
> @@ -1660,6 +1676,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>   
>   	/* In case raid1d snuck in to freeze_array */
>   	wake_up_barrier(conf);
> +	return;
> +err_handle:
> +	for (k = 0; k < i; k++) {
> +		if (r1_bio->bios[k]) {
> +			rdev_dec_pending(conf->mirrors[k].rdev, mddev);
> +			r1_bio->bios[k] = NULL;
> +		}
> +	}
> +
> +	bio->bi_status = errno_to_blk_status(error);
> +	set_bit(R1BIO_Uptodate, &r1_bio->state);
> +	raid_end_bio_io(r1_bio);
>   }
>   
>   static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
> 

Re: [PATCH v2 6/7] md/raid1: Handle bio_split() errors
Posted by Yu Kuai 3 weeks, 6 days ago
Hi,

在 2024/10/28 23:27, John Garry 写道:
> Add proper bio_split() error handling. For any error, call
> raid_end_bio_io() and return.
> 
> For the case of an in the write path, we need to undo the increment in
> the rdev panding count and NULLify the r1_bio->bios[] pointers.
> 
> Signed-off-by: John Garry <john.g.garry@oracle.com>
> ---
>   drivers/md/raid1.c | 32 ++++++++++++++++++++++++++++++--
>   1 file changed, 30 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 6c9d24203f39..a10018282629 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
>   	const enum req_op op = bio_op(bio);
>   	const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
>   	int max_sectors;
> -	int rdisk;
> +	int rdisk, error;
>   	bool r1bio_existed = !!r1_bio;
>   
>   	/*
> @@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
>   	if (max_sectors < bio_sectors(bio)) {
>   		struct bio *split = bio_split(bio, max_sectors,
>   					      gfp, &conf->bio_split);
> +
> +		if (IS_ERR(split)) {
> +			error = PTR_ERR(split);
> +			goto err_handle;
> +		}
>   		bio_chain(split, bio);
>   		submit_bio_noacct(bio);
>   		bio = split;
> @@ -1410,6 +1415,12 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
>   	read_bio->bi_private = r1_bio;
>   	mddev_trace_remap(mddev, read_bio, r1_bio->sector);
>   	submit_bio_noacct(read_bio);
> +	return;
> +
> +err_handle:
> +	bio->bi_status = errno_to_blk_status(error);
> +	set_bit(R1BIO_Uptodate, &r1_bio->state);
> +	raid_end_bio_io(r1_bio);
>   }
>   
>   static void raid1_write_request(struct mddev *mddev, struct bio *bio,
> @@ -1417,7 +1428,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>   {
>   	struct r1conf *conf = mddev->private;
>   	struct r1bio *r1_bio;
> -	int i, disks;
> +	int i, disks, k, error;
>   	unsigned long flags;
>   	struct md_rdev *blocked_rdev;
>   	int first_clone;
> @@ -1576,6 +1587,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>   	if (max_sectors < bio_sectors(bio)) {
>   		struct bio *split = bio_split(bio, max_sectors,
>   					      GFP_NOIO, &conf->bio_split);
> +
> +		if (IS_ERR(split)) {
> +			error = PTR_ERR(split);
> +			goto err_handle;
> +		}
>   		bio_chain(split, bio);
>   		submit_bio_noacct(bio);
>   		bio = split;
> @@ -1660,6 +1676,18 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>   
>   	/* In case raid1d snuck in to freeze_array */
>   	wake_up_barrier(conf);
> +	return;
> +err_handle:
> +	for (k = 0; k < i; k++) {
> +		if (r1_bio->bios[k]) {
> +			rdev_dec_pending(conf->mirrors[k].rdev, mddev);
> +			r1_bio->bios[k] = NULL;
> +		}
> +	}
> +
> +	bio->bi_status = errno_to_blk_status(error);
> +	set_bit(R1BIO_Uptodate, &r1_bio->state);
> +	raid_end_bio_io(r1_bio);

Looks good that error code is passed to orig bio. However,
I really think badblocks should be handled somehow, it just doesn't make
sense to return IO error to filesystems or user if one underlying disk
contain BB, while others are good.

Or is it guaranteed that IO error by atomic write won't hurt anyone,
user will handle this error and retry with non atomic write?

Thanks,
Kuai
>   }
>   
>   static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
> 

Re: [PATCH v2 6/7] md/raid1: Handle bio_split() errors
Posted by John Garry 3 weeks, 6 days ago
On 29/10/2024 03:48, Yu Kuai wrote:
> Hi,
> 
> 在 2024/10/28 23:27, John Garry 写道:
>> Add proper bio_split() error handling. For any error, call
>> raid_end_bio_io() and return.
>>
>> For the case of an in the write path, we need to undo the increment in
>> the rdev panding count and NULLify the r1_bio->bios[] pointers.
>>
>> Signed-off-by: John Garry <john.g.garry@oracle.com>
>> ---
>>   drivers/md/raid1.c | 32 ++++++++++++++++++++++++++++++--
>>   1 file changed, 30 insertions(+), 2 deletions(-)
>>
>> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
>> index 6c9d24203f39..a10018282629 100644
>> --- a/drivers/md/raid1.c
>> +++ b/drivers/md/raid1.c
>> @@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev 
>> *mddev, struct bio *bio,
>>       const enum req_op op = bio_op(bio);
>>       const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
>>       int max_sectors;
>> -    int rdisk;
>> +    int rdisk, error;
>>       bool r1bio_existed = !!r1_bio;
>>       /*
>> @@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev 
>> *mddev, struct bio *bio,
>>       if (max_sectors < bio_sectors(bio)) {
>>           struct bio *split = bio_split(bio, max_sectors,
>>                             gfp, &conf->bio_split);
>> +
>> +        if (IS_ERR(split)) {
>> +            error = PTR_ERR(split);
>> +            goto err_handle;
>> +        }
>>           bio_chain(split, bio);
>>           submit_bio_noacct(bio);
>>           bio = split;
>> @@ -1410,6 +1415,12 @@ static void raid1_read_request(struct mddev 
>> *mddev, struct bio *bio,
>>       read_bio->bi_private = r1_bio;
>>       mddev_trace_remap(mddev, read_bio, r1_bio->sector);
>>       submit_bio_noacct(read_bio);
>> +    return;
>> +
>> +err_handle:
>> +    bio->bi_status = errno_to_blk_status(error);
>> +    set_bit(R1BIO_Uptodate, &r1_bio->state);
>> +    raid_end_bio_io(r1_bio);
>>   }
>>   static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>> @@ -1417,7 +1428,7 @@ static void raid1_write_request(struct mddev 
>> *mddev, struct bio *bio,
>>   {
>>       struct r1conf *conf = mddev->private;
>>       struct r1bio *r1_bio;
>> -    int i, disks;
>> +    int i, disks, k, error;
>>       unsigned long flags;
>>       struct md_rdev *blocked_rdev;
>>       int first_clone;
>> @@ -1576,6 +1587,11 @@ static void raid1_write_request(struct mddev 
>> *mddev, struct bio *bio,
>>       if (max_sectors < bio_sectors(bio)) {
>>           struct bio *split = bio_split(bio, max_sectors,
>>                             GFP_NOIO, &conf->bio_split);
>> +
>> +        if (IS_ERR(split)) {
>> +            error = PTR_ERR(split);
>> +            goto err_handle;
>> +        }
>>           bio_chain(split, bio);
>>           submit_bio_noacct(bio);
>>           bio = split;
>> @@ -1660,6 +1676,18 @@ static void raid1_write_request(struct mddev 
>> *mddev, struct bio *bio,
>>       /* In case raid1d snuck in to freeze_array */
>>       wake_up_barrier(conf);
>> +    return;
>> +err_handle:
>> +    for (k = 0; k < i; k++) {
>> +        if (r1_bio->bios[k]) {
>> +            rdev_dec_pending(conf->mirrors[k].rdev, mddev);
>> +            r1_bio->bios[k] = NULL;
>> +        }
>> +    }
>> +
>> +    bio->bi_status = errno_to_blk_status(error);
>> +    set_bit(R1BIO_Uptodate, &r1_bio->state);
>> +    raid_end_bio_io(r1_bio);

Hi Kuai,

> 
> Looks good that error code is passed to orig bio. However,
> I really think badblocks should be handled somehow, it just doesn't make
> sense to return IO error to filesystems or user if one underlying disk
> contain BB, while others are good.

Please be aware that this change is not for handling splits in atomic 
writes. It is for situation when split fails for whatever reason - 
likely a software bug.

For when atomic writes are supported for raid1, my plan is that an 
atomic write over a region which covers a BB will error, i.e. goto 
err_handle, like:

--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1514,6 +1514,12 @@ static void raid1_write_request(struct mddev 
*mddev, struct bio *bio,
  				break;
  			}

+			if (is_bad && bio->bi_opf & REQ_ATOMIC) {
+				/* We just cannot atomically write this ... */
+				err = -EIO;
+				goto err_handle;
+			}
+
  			if (is_bad && first_bad <= r1_bio->sector) {


I just think that if we try to write a region atomically which contains 
BBs then we should error. Indeed, as I mentioned previously, I really 
don't expect BBs on devices which support atomic writes. But we should 
still handle it.

OTOH, if we did want to handle atomic writes to regions with BBs, we 
could make a bigger effort and write the disks which don't have BBs 
atomically (so that we don't split for those good disks). But this is 
too complicated and does not achieve much.

> 
> Or is it guaranteed that IO error by atomic write won't hurt anyone,
> user will handle this error and retry with non atomic write?

Yes, I think that the user could retry non-atomically for the same 
write. Maybe returning a special error code could be useful for this.

Thanks,
John
Re: [PATCH v2 6/7] md/raid1: Handle bio_split() errors
Posted by Yu Kuai 3 weeks, 5 days ago
Hi,

在 2024/10/29 16:45, John Garry 写道:
> On 29/10/2024 03:48, Yu Kuai wrote:
>> Hi,
>>
>> 在 2024/10/28 23:27, John Garry 写道:
>>> Add proper bio_split() error handling. For any error, call
>>> raid_end_bio_io() and return.
>>>
>>> For the case of an in the write path, we need to undo the increment in
>>> the rdev panding count and NULLify the r1_bio->bios[] pointers.
>>>
>>> Signed-off-by: John Garry <john.g.garry@oracle.com>
>>> ---
>>>   drivers/md/raid1.c | 32 ++++++++++++++++++++++++++++++--
>>>   1 file changed, 30 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
>>> index 6c9d24203f39..a10018282629 100644
>>> --- a/drivers/md/raid1.c
>>> +++ b/drivers/md/raid1.c
>>> @@ -1322,7 +1322,7 @@ static void raid1_read_request(struct mddev 
>>> *mddev, struct bio *bio,
>>>       const enum req_op op = bio_op(bio);
>>>       const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
>>>       int max_sectors;
>>> -    int rdisk;
>>> +    int rdisk, error;
>>>       bool r1bio_existed = !!r1_bio;
>>>       /*
>>> @@ -1383,6 +1383,11 @@ static void raid1_read_request(struct mddev 
>>> *mddev, struct bio *bio,
>>>       if (max_sectors < bio_sectors(bio)) {
>>>           struct bio *split = bio_split(bio, max_sectors,
>>>                             gfp, &conf->bio_split);
>>> +
>>> +        if (IS_ERR(split)) {
>>> +            error = PTR_ERR(split);
>>> +            goto err_handle;
>>> +        }
>>>           bio_chain(split, bio);
>>>           submit_bio_noacct(bio);
>>>           bio = split;
>>> @@ -1410,6 +1415,12 @@ static void raid1_read_request(struct mddev 
>>> *mddev, struct bio *bio,
>>>       read_bio->bi_private = r1_bio;
>>>       mddev_trace_remap(mddev, read_bio, r1_bio->sector);
>>>       submit_bio_noacct(read_bio);
>>> +    return;
>>> +
>>> +err_handle:
>>> +    bio->bi_status = errno_to_blk_status(error);
>>> +    set_bit(R1BIO_Uptodate, &r1_bio->state);
>>> +    raid_end_bio_io(r1_bio);
>>>   }
>>>   static void raid1_write_request(struct mddev *mddev, struct bio *bio,
>>> @@ -1417,7 +1428,7 @@ static void raid1_write_request(struct mddev 
>>> *mddev, struct bio *bio,
>>>   {
>>>       struct r1conf *conf = mddev->private;
>>>       struct r1bio *r1_bio;
>>> -    int i, disks;
>>> +    int i, disks, k, error;
>>>       unsigned long flags;
>>>       struct md_rdev *blocked_rdev;
>>>       int first_clone;
>>> @@ -1576,6 +1587,11 @@ static void raid1_write_request(struct mddev 
>>> *mddev, struct bio *bio,
>>>       if (max_sectors < bio_sectors(bio)) {
>>>           struct bio *split = bio_split(bio, max_sectors,
>>>                             GFP_NOIO, &conf->bio_split);
>>> +
>>> +        if (IS_ERR(split)) {
>>> +            error = PTR_ERR(split);
>>> +            goto err_handle;
>>> +        }
>>>           bio_chain(split, bio);
>>>           submit_bio_noacct(bio);
>>>           bio = split;
>>> @@ -1660,6 +1676,18 @@ static void raid1_write_request(struct mddev 
>>> *mddev, struct bio *bio,
>>>       /* In case raid1d snuck in to freeze_array */
>>>       wake_up_barrier(conf);
>>> +    return;
>>> +err_handle:
>>> +    for (k = 0; k < i; k++) {
>>> +        if (r1_bio->bios[k]) {
>>> +            rdev_dec_pending(conf->mirrors[k].rdev, mddev);
>>> +            r1_bio->bios[k] = NULL;
>>> +        }
>>> +    }
>>> +
>>> +    bio->bi_status = errno_to_blk_status(error);
>>> +    set_bit(R1BIO_Uptodate, &r1_bio->state);
>>> +    raid_end_bio_io(r1_bio);
> 
> Hi Kuai,
> 
>>
>> Looks good that error code is passed to orig bio. However,
>> I really think badblocks should be handled somehow, it just doesn't make
>> sense to return IO error to filesystems or user if one underlying disk
>> contain BB, while others are good.
> 
> Please be aware that this change is not for handling splits in atomic 
> writes. It is for situation when split fails for whatever reason - 
> likely a software bug.
> 
> For when atomic writes are supported for raid1, my plan is that an 
> atomic write over a region which covers a BB will error, i.e. goto 
> err_handle, like:
> 
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -1514,6 +1514,12 @@ static void raid1_write_request(struct mddev 
> *mddev, struct bio *bio,
>                   break;
>               }
> 
> +            if (is_bad && bio->bi_opf & REQ_ATOMIC) {
> +                /* We just cannot atomically write this ... */
> +                err = -EIO;
> +                goto err_handle;
> +            }
> +
>               if (is_bad && first_bad <= r1_bio->sector) {
> 
> 
> I just think that if we try to write a region atomically which contains 
> BBs then we should error. Indeed, as I mentioned previously, I really 
> don't expect BBs on devices which support atomic writes. But we should 
> still handle it.
> 
Agreed.

> OTOH, if we did want to handle atomic writes to regions with BBs, we 
> could make a bigger effort and write the disks which don't have BBs 
> atomically (so that we don't split for those good disks). But this is 
> too complicated and does not achieve much.

Agreed.

> 
>>
>> Or is it guaranteed that IO error by atomic write won't hurt anyone,
>> user will handle this error and retry with non atomic write?
> 
> Yes, I think that the user could retry non-atomically for the same 
> write. Maybe returning a special error code could be useful for this.

And can you update the above error path comment when you support raid1
and raid10?

Thanks,
Kuai

> 
> Thanks,
> John
> 
> .
> 

Re: [PATCH v2 6/7] md/raid1: Handle bio_split() errors
Posted by John Garry 3 weeks, 5 days ago
On 29/10/2024 11:30, Yu Kuai wrote:
>>>
>>> Or is it guaranteed that IO error by atomic write won't hurt anyone,
>>> user will handle this error and retry with non atomic write?
>>
>> Yes, I think that the user could retry non-atomically for the same 
>> write. Maybe returning a special error code could be useful for this.
> 
> And can you update the above error path comment when you support raid1
> and raid10?

Sure, can do. I am not sure on a special error code value. I will think 
about it.

And I will send update for 
https://lore.kernel.org/linux-raid/20240903150748.2179966-1-john.g.garry@oracle.com/T/#m5daa8d32d825d74422bbff272c9b25b6c4fc2788 
soon with this suggestion.

Thanks,
John