From: Li Nan <linan122@huawei.com>
'need_recover' and 'mrdev' are equivalent in raid10_sync_request(), and
inc mrdev->nr_pending is unreasonable if don't need recovery. Replace
'need_recover' with 'mrdev', and only inc nr_pending when needed.
Suggested-by: Yu Kuai <yukuai3@huawei.com>
Signed-off-by: Li Nan <linan122@huawei.com>
---
drivers/md/raid10.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e21502c03b45..9de9eabff209 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3437,7 +3437,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect;
int must_sync;
int any_working;
- int need_recover = 0;
struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace;
@@ -3446,14 +3445,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
mreplace = rcu_dereference(mirror->replacement);
if (mrdev != NULL &&
- !test_bit(Faulty, &mrdev->flags) &&
- !test_bit(In_sync, &mrdev->flags))
- need_recover = 1;
+ (test_bit(Faulty, &mrdev->flags) ||
+ test_bit(In_sync, &mrdev->flags)))
+ mrdev = NULL;
if (mreplace != NULL &&
test_bit(Faulty, &mreplace->flags))
mreplace = NULL;
- if (!need_recover && !mreplace) {
+ if (!mrdev && !mreplace) {
rcu_read_unlock();
continue;
}
@@ -3487,7 +3486,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
rcu_read_unlock();
continue;
}
- atomic_inc(&mrdev->nr_pending);
+ if (mrdev)
+ atomic_inc(&mrdev->nr_pending);
if (mreplace)
atomic_inc(&mreplace->nr_pending);
rcu_read_unlock();
@@ -3574,7 +3574,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr;
- if (need_recover) {
+ if (mrdev) {
bio = r10_bio->devs[1].bio;
bio->bi_next = biolist;
biolist = bio;
@@ -3619,7 +3619,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
for (k = 0; k < conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
- if (!test_bit(In_sync,
+ if (mrdev && !test_bit(In_sync,
&mrdev->flags)
&& !rdev_set_badblocks(
mrdev,
@@ -3645,12 +3645,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (rb2)
atomic_dec(&rb2->remaining);
r10_bio = rb2;
- rdev_dec_pending(mrdev, mddev);
+ if (mrdev)
+ rdev_dec_pending(mrdev, mddev);
if (mreplace)
rdev_dec_pending(mreplace, mddev);
break;
}
- rdev_dec_pending(mrdev, mddev);
+ if (mrdev)
+ rdev_dec_pending(mrdev, mddev);
if (mreplace)
rdev_dec_pending(mreplace, mddev);
if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
--
2.31.1
在 2023/05/26 15:45, linan666@huaweicloud.com 写道: > From: Li Nan <linan122@huawei.com> > > 'need_recover' and 'mrdev' are equivalent in raid10_sync_request(), and > inc mrdev->nr_pending is unreasonable if don't need recovery. Replace > 'need_recover' with 'mrdev', and only inc nr_pending when needed. LGTM, feel free to add: Reviewed-by: Yu Kuai <yukuai3@huawei.com> > > Suggested-by: Yu Kuai <yukuai3@huawei.com> > Signed-off-by: Li Nan <linan122@huawei.com> > --- > drivers/md/raid10.c | 22 ++++++++++++---------- > 1 file changed, 12 insertions(+), 10 deletions(-) > > diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c > index e21502c03b45..9de9eabff209 100644 > --- a/drivers/md/raid10.c > +++ b/drivers/md/raid10.c > @@ -3437,7 +3437,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, > sector_t sect; > int must_sync; > int any_working; > - int need_recover = 0; > struct raid10_info *mirror = &conf->mirrors[i]; > struct md_rdev *mrdev, *mreplace; > > @@ -3446,14 +3445,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, > mreplace = rcu_dereference(mirror->replacement); > > if (mrdev != NULL && > - !test_bit(Faulty, &mrdev->flags) && > - !test_bit(In_sync, &mrdev->flags)) > - need_recover = 1; > + (test_bit(Faulty, &mrdev->flags) || > + test_bit(In_sync, &mrdev->flags))) > + mrdev = NULL; > if (mreplace != NULL && > test_bit(Faulty, &mreplace->flags)) > mreplace = NULL; > > - if (!need_recover && !mreplace) { > + if (!mrdev && !mreplace) { > rcu_read_unlock(); > continue; > } > @@ -3487,7 +3486,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, > rcu_read_unlock(); > continue; > } > - atomic_inc(&mrdev->nr_pending); > + if (mrdev) > + atomic_inc(&mrdev->nr_pending); > if (mreplace) > atomic_inc(&mreplace->nr_pending); > rcu_read_unlock(); > @@ -3574,7 +3574,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, > r10_bio->devs[1].devnum = i; > r10_bio->devs[1].addr = to_addr; > > - if (need_recover) { > + if (mrdev) { > bio = r10_bio->devs[1].bio; > bio->bi_next = biolist; > biolist = bio; > @@ -3619,7 +3619,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, > for (k = 0; k < conf->copies; k++) > if (r10_bio->devs[k].devnum == i) > break; > - if (!test_bit(In_sync, > + if (mrdev && !test_bit(In_sync, > &mrdev->flags) > && !rdev_set_badblocks( > mrdev, > @@ -3645,12 +3645,14 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, > if (rb2) > atomic_dec(&rb2->remaining); > r10_bio = rb2; > - rdev_dec_pending(mrdev, mddev); > + if (mrdev) > + rdev_dec_pending(mrdev, mddev); > if (mreplace) > rdev_dec_pending(mreplace, mddev); > break; > } > - rdev_dec_pending(mrdev, mddev); > + if (mrdev) > + rdev_dec_pending(mrdev, mddev); > if (mreplace) > rdev_dec_pending(mreplace, mddev); > if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { >
© 2016 - 2024 Red Hat, Inc.