[PATCH v2 06/14] md: Clean up folio sync support related code

linan666@huaweicloud.com posted 14 patches 1 week, 5 days ago
[PATCH v2 06/14] md: Clean up folio sync support related code
Posted by linan666@huaweicloud.com 1 week, 5 days ago
From: Li Nan <linan122@huawei.com>

1. Remove resync_get_all_folio() and invoke folio_get() directly instead.
2. Clean up redundant while(0) loop in md_bio_reset_resync_folio().
3. Clean up bio variable by directly referencing r10_bio->devs[j].bio
   instead in r1buf_pool_alloc() and r10buf_pool_alloc().
4. Clean up RESYNC_PAGES.

Signed-off-by: Li Nan <linan122@huawei.com>
Reviewed-by: Xiao Ni <xni@redhat.com>
---
 drivers/md/raid1-10.c | 22 ++++++----------------
 drivers/md/raid1.c    |  6 ++----
 drivers/md/raid10.c   |  6 ++----
 3 files changed, 10 insertions(+), 24 deletions(-)

diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 300fbe9dc02e..568ab002691f 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Maximum size of each resync request */
 #define RESYNC_BLOCK_SIZE (64*1024)
-#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
 
 /*
@@ -56,11 +55,6 @@ static inline void resync_free_folio(struct resync_folio *rf)
 	folio_put(rf->folio);
 }
 
-static inline void resync_get_folio(struct resync_folio *rf)
-{
-	folio_get(rf->folio);
-}
-
 static inline struct folio *resync_fetch_folio(struct resync_folio *rf)
 {
 	return rf->folio;
@@ -80,16 +74,12 @@ static void md_bio_reset_resync_folio(struct bio *bio, struct resync_folio *rf,
 			       int size)
 {
 	/* initialize bvec table again */
-	do {
-		struct folio *folio = resync_fetch_folio(rf);
-		int len = min_t(int, size, RESYNC_BLOCK_SIZE);
-
-		if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) {
-			bio->bi_status = BLK_STS_RESOURCE;
-			bio_endio(bio);
-			return;
-		}
-	} while (0);
+	if (WARN_ON(!bio_add_folio(bio, resync_fetch_folio(rf),
+				   min_t(int, size, RESYNC_BLOCK_SIZE),
+				   0))) {
+		bio->bi_status = BLK_STS_RESOURCE;
+		bio_endio(bio);
+	}
 }
 
 
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d9c106529289..5954ead7dfd4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -181,18 +181,16 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
 	for (j = 0; j < conf->raid_disks * 2; j++) {
 		struct resync_folio *rf = &rfs[j];
 
-		bio = r1_bio->bios[j];
-
 		if (j < need_folio) {
 			if (resync_alloc_folio(rf, gfp_flags))
 				goto out_free_folio;
 		} else {
 			memcpy(rf, &rfs[0], sizeof(*rf));
-			resync_get_folio(rf);
+			folio_get(rf->folio);
 		}
 
 		rf->raid_bio = r1_bio;
-		bio->bi_private = rf;
+		r1_bio->bios[j]->bi_private = rf;
 	}
 
 	r1_bio->master_bio = NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7533aeb23819..5c0975ec8809 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -183,19 +183,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
 		if (rbio)
 			rf_repl = &rfs[nalloc + j];
 
-		bio = r10_bio->devs[j].bio;
-
 		if (!j || test_bit(MD_RECOVERY_SYNC,
 				   &conf->mddev->recovery)) {
 			if (resync_alloc_folio(rf, gfp_flags))
 				goto out_free_folio;
 		} else {
 			memcpy(rf, &rfs[0], sizeof(*rf));
-			resync_get_folio(rf);
+			folio_get(rf->folio);
 		}
 
 		rf->raid_bio = r10_bio;
-		bio->bi_private = rf;
+		r10_bio->devs[j].bio->bi_private = rf;
 		if (rbio) {
 			memcpy(rf_repl, rf, sizeof(*rf));
 			rbio->bi_private = rf_repl;
-- 
2.39.2
Re: [PATCH v2 06/14] md: Clean up folio sync support related code
Posted by Yu Kuai 5 days, 5 hours ago
Hi,

在 2026/1/28 15:57, linan666@huaweicloud.com 写道:
> From: Li Nan <linan122@huawei.com>
>
> 1. Remove resync_get_all_folio() and invoke folio_get() directly instead.
> 2. Clean up redundant while(0) loop in md_bio_reset_resync_folio().
> 3. Clean up bio variable by directly referencing r10_bio->devs[j].bio
>     instead in r1buf_pool_alloc() and r10buf_pool_alloc().
> 4. Clean up RESYNC_PAGES.
>
> Signed-off-by: Li Nan <linan122@huawei.com>
> Reviewed-by: Xiao Ni <xni@redhat.com>
> ---
>   drivers/md/raid1-10.c | 22 ++++++----------------
>   drivers/md/raid1.c    |  6 ++----
>   drivers/md/raid10.c   |  6 ++----
>   3 files changed, 10 insertions(+), 24 deletions(-)

I think this patch can be merged into patch 5.

> diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
> index 300fbe9dc02e..568ab002691f 100644
> --- a/drivers/md/raid1-10.c
> +++ b/drivers/md/raid1-10.c
> @@ -1,7 +1,6 @@
>   // SPDX-License-Identifier: GPL-2.0
>   /* Maximum size of each resync request */
>   #define RESYNC_BLOCK_SIZE (64*1024)
> -#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
>   #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
>   
>   /*
> @@ -56,11 +55,6 @@ static inline void resync_free_folio(struct resync_folio *rf)
>   	folio_put(rf->folio);
>   }
>   
> -static inline void resync_get_folio(struct resync_folio *rf)
> -{
> -	folio_get(rf->folio);
> -}
> -
>   static inline struct folio *resync_fetch_folio(struct resync_folio *rf)
>   {
>   	return rf->folio;
> @@ -80,16 +74,12 @@ static void md_bio_reset_resync_folio(struct bio *bio, struct resync_folio *rf,
>   			       int size)
>   {
>   	/* initialize bvec table again */
> -	do {
> -		struct folio *folio = resync_fetch_folio(rf);
> -		int len = min_t(int, size, RESYNC_BLOCK_SIZE);
> -
> -		if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) {
> -			bio->bi_status = BLK_STS_RESOURCE;
> -			bio_endio(bio);
> -			return;
> -		}
> -	} while (0);
> +	if (WARN_ON(!bio_add_folio(bio, resync_fetch_folio(rf),
> +				   min_t(int, size, RESYNC_BLOCK_SIZE),
> +				   0))) {
> +		bio->bi_status = BLK_STS_RESOURCE;
> +		bio_endio(bio);
> +	}
>   }
>   
>   
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index d9c106529289..5954ead7dfd4 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -181,18 +181,16 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
>   	for (j = 0; j < conf->raid_disks * 2; j++) {
>   		struct resync_folio *rf = &rfs[j];
>   
> -		bio = r1_bio->bios[j];
> -
>   		if (j < need_folio) {
>   			if (resync_alloc_folio(rf, gfp_flags))
>   				goto out_free_folio;
>   		} else {
>   			memcpy(rf, &rfs[0], sizeof(*rf));
> -			resync_get_folio(rf);
> +			folio_get(rf->folio);
>   		}
>   
>   		rf->raid_bio = r1_bio;
> -		bio->bi_private = rf;
> +		r1_bio->bios[j]->bi_private = rf;
>   	}
>   
>   	r1_bio->master_bio = NULL;
> diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
> index 7533aeb23819..5c0975ec8809 100644
> --- a/drivers/md/raid10.c
> +++ b/drivers/md/raid10.c
> @@ -183,19 +183,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
>   		if (rbio)
>   			rf_repl = &rfs[nalloc + j];
>   
> -		bio = r10_bio->devs[j].bio;
> -
>   		if (!j || test_bit(MD_RECOVERY_SYNC,
>   				   &conf->mddev->recovery)) {
>   			if (resync_alloc_folio(rf, gfp_flags))
>   				goto out_free_folio;
>   		} else {
>   			memcpy(rf, &rfs[0], sizeof(*rf));
> -			resync_get_folio(rf);
> +			folio_get(rf->folio);
>   		}
>   
>   		rf->raid_bio = r10_bio;
> -		bio->bi_private = rf;
> +		r10_bio->devs[j].bio->bi_private = rf;
>   		if (rbio) {
>   			memcpy(rf_repl, rf, sizeof(*rf));
>   			rbio->bi_private = rf_repl;

-- 
Thansk,
Kuai
Re: [PATCH v2 06/14] md: Clean up folio sync support related code
Posted by Li Nan 4 days, 15 hours ago

在 2026/2/5 0:52, Yu Kuai 写道:
> Hi,
> 
> 在 2026/1/28 15:57, linan666@huaweicloud.com 写道:
>> From: Li Nan <linan122@huawei.com>
>>
>> 1. Remove resync_get_all_folio() and invoke folio_get() directly instead.
>> 2. Clean up redundant while(0) loop in md_bio_reset_resync_folio().
>> 3. Clean up bio variable by directly referencing r10_bio->devs[j].bio
>>      instead in r1buf_pool_alloc() and r10buf_pool_alloc().
>> 4. Clean up RESYNC_PAGES.
>>
>> Signed-off-by: Li Nan <linan122@huawei.com>
>> Reviewed-by: Xiao Ni <xni@redhat.com>
>> ---
>>    drivers/md/raid1-10.c | 22 ++++++----------------
>>    drivers/md/raid1.c    |  6 ++----
>>    drivers/md/raid10.c   |  6 ++----
>>    3 files changed, 10 insertions(+), 24 deletions(-)
> 
> I think this patch can be merged into patch 5.
> 

Thanks for your review.

Previously I modified them together, which made patch 5 difficult to review
as it is already large enough. I will merge them in the next version.

-- 
Thanks,
Nan