[PATCH v2 03/14] md/raid1: use folio for tmppage

linan666@huaweicloud.com posted 14 patches 1 week, 5 days ago
[PATCH v2 03/14] md/raid1: use folio for tmppage
Posted by linan666@huaweicloud.com 1 week, 5 days ago
From: Li Nan <linan122@huawei.com>

Convert tmppage to tmpfolio and use it throughout in raid1.

Signed-off-by: Li Nan <linan122@huawei.com>
Reviewed-by: Xiao Ni <xni@redhat.com>
---
 drivers/md/raid1.h |  2 +-
 drivers/md/raid1.c | 18 ++++++++++--------
 2 files changed, 11 insertions(+), 9 deletions(-)

diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c98d43a7ae99..d480b3a8c2c4 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -101,7 +101,7 @@ struct r1conf {
 	/* temporary buffer to synchronous IO when attempting to repair
 	 * a read error.
 	 */
-	struct page		*tmppage;
+	struct folio		*tmpfolio;
 
 	/* When taking over an array from a different personality, we store
 	 * the new thread here until we fully activate the array.
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 407925951299..43453f1a04f4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 			      rdev->recovery_offset >= sect + s)) &&
 			    rdev_has_badblock(rdev, sect, s) == 0) {
 				atomic_inc(&rdev->nr_pending);
-				if (sync_page_io(rdev, sect, s<<9,
-					 conf->tmppage, REQ_OP_READ, false))
+				if (sync_folio_io(rdev, sect, s<<9, 0,
+					 conf->tmpfolio, REQ_OP_READ, false))
 					success = 1;
 				rdev_dec_pending(rdev, mddev);
 				if (success)
@@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 			    !test_bit(Faulty, &rdev->flags)) {
 				atomic_inc(&rdev->nr_pending);
 				r1_sync_page_io(rdev, sect, s,
-						conf->tmppage, REQ_OP_WRITE);
+						folio_page(conf->tmpfolio, 0),
+						REQ_OP_WRITE);
 				rdev_dec_pending(rdev, mddev);
 			}
 		}
@@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 			    !test_bit(Faulty, &rdev->flags)) {
 				atomic_inc(&rdev->nr_pending);
 				if (r1_sync_page_io(rdev, sect, s,
-						conf->tmppage, REQ_OP_READ)) {
+						folio_page(conf->tmpfolio, 0),
+						REQ_OP_READ)) {
 					atomic_add(s, &rdev->corrected_errors);
 					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
 						mdname(mddev), s,
@@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 	if (!conf->mirrors)
 		goto abort;
 
-	conf->tmppage = alloc_page(GFP_KERNEL);
-	if (!conf->tmppage)
+	conf->tmpfolio = folio_alloc(GFP_KERNEL, 0);
+	if (!conf->tmpfolio)
 		goto abort;
 
 	r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
@@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 	if (conf) {
 		mempool_destroy(conf->r1bio_pool);
 		kfree(conf->mirrors);
-		safe_put_page(conf->tmppage);
+		folio_put(conf->tmpfolio);
 		kfree(conf->nr_pending);
 		kfree(conf->nr_waiting);
 		kfree(conf->nr_queued);
@@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
 
 	mempool_destroy(conf->r1bio_pool);
 	kfree(conf->mirrors);
-	safe_put_page(conf->tmppage);
+	folio_put(conf->tmpfolio);
 	kfree(conf->nr_pending);
 	kfree(conf->nr_waiting);
 	kfree(conf->nr_queued);
-- 
2.39.2
Re: [PATCH v2 03/14] md/raid1: use folio for tmppage
Posted by Yu Kuai 5 days, 5 hours ago
Hi,

在 2026/1/28 15:56, linan666@huaweicloud.com 写道:
> From: Li Nan <linan122@huawei.com>
>
> Convert tmppage to tmpfolio and use it throughout in raid1.
>
> Signed-off-by: Li Nan <linan122@huawei.com>
> Reviewed-by: Xiao Ni <xni@redhat.com>
> ---
>   drivers/md/raid1.h |  2 +-
>   drivers/md/raid1.c | 18 ++++++++++--------
>   2 files changed, 11 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
> index c98d43a7ae99..d480b3a8c2c4 100644
> --- a/drivers/md/raid1.h
> +++ b/drivers/md/raid1.h
> @@ -101,7 +101,7 @@ struct r1conf {
>   	/* temporary buffer to synchronous IO when attempting to repair
>   	 * a read error.
>   	 */
> -	struct page		*tmppage;
> +	struct folio		*tmpfolio;
>   
>   	/* When taking over an array from a different personality, we store
>   	 * the new thread here until we fully activate the array.
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 407925951299..43453f1a04f4 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>   			      rdev->recovery_offset >= sect + s)) &&
>   			    rdev_has_badblock(rdev, sect, s) == 0) {
>   				atomic_inc(&rdev->nr_pending);
> -				if (sync_page_io(rdev, sect, s<<9,
> -					 conf->tmppage, REQ_OP_READ, false))
> +				if (sync_folio_io(rdev, sect, s<<9, 0,
> +					 conf->tmpfolio, REQ_OP_READ, false))
>   					success = 1;
>   				rdev_dec_pending(rdev, mddev);
>   				if (success)
> @@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>   			    !test_bit(Faulty, &rdev->flags)) {
>   				atomic_inc(&rdev->nr_pending);
>   				r1_sync_page_io(rdev, sect, s,
> -						conf->tmppage, REQ_OP_WRITE);
> +						folio_page(conf->tmpfolio, 0),
> +						REQ_OP_WRITE);
>   				rdev_dec_pending(rdev, mddev);
>   			}
>   		}
> @@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>   			    !test_bit(Faulty, &rdev->flags)) {
>   				atomic_inc(&rdev->nr_pending);
>   				if (r1_sync_page_io(rdev, sect, s,
> -						conf->tmppage, REQ_OP_READ)) {
> +						folio_page(conf->tmpfolio, 0),
> +						REQ_OP_READ)) {
>   					atomic_add(s, &rdev->corrected_errors);
>   					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
>   						mdname(mddev), s,
> @@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>   	if (!conf->mirrors)
>   		goto abort;
>   
> -	conf->tmppage = alloc_page(GFP_KERNEL);
> -	if (!conf->tmppage)
> +	conf->tmpfolio = folio_alloc(GFP_KERNEL, 0);
> +	if (!conf->tmpfolio)
>   		goto abort;
>   
>   	r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
> @@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>   	if (conf) {
>   		mempool_destroy(conf->r1bio_pool);
>   		kfree(conf->mirrors);
> -		safe_put_page(conf->tmppage);
> +		folio_put(conf->tmpfolio);

Is this safe? folio_put() can't be called with NULL.

>   		kfree(conf->nr_pending);
>   		kfree(conf->nr_waiting);
>   		kfree(conf->nr_queued);
> @@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
>   
>   	mempool_destroy(conf->r1bio_pool);
>   	kfree(conf->mirrors);
> -	safe_put_page(conf->tmppage);
> +	folio_put(conf->tmpfolio);

Same here.

>   	kfree(conf->nr_pending);
>   	kfree(conf->nr_waiting);
>   	kfree(conf->nr_queued);

-- 
Thansk,
Kuai
Re: [PATCH v2 03/14] md/raid1: use folio for tmppage
Posted by Li Nan 4 days, 15 hours ago

在 2026/2/5 0:45, Yu Kuai 写道:
> Hi,
> 
> 在 2026/1/28 15:56, linan666@huaweicloud.com 写道:
>> From: Li Nan <linan122@huawei.com>
>>
>> Convert tmppage to tmpfolio and use it throughout in raid1.
>>
>> Signed-off-by: Li Nan <linan122@huawei.com>
>> Reviewed-by: Xiao Ni <xni@redhat.com>
>> ---
>>    drivers/md/raid1.h |  2 +-
>>    drivers/md/raid1.c | 18 ++++++++++--------
>>    2 files changed, 11 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
>> index c98d43a7ae99..d480b3a8c2c4 100644
>> --- a/drivers/md/raid1.h
>> +++ b/drivers/md/raid1.h
>> @@ -101,7 +101,7 @@ struct r1conf {
>>    	/* temporary buffer to synchronous IO when attempting to repair
>>    	 * a read error.
>>    	 */
>> -	struct page		*tmppage;
>> +	struct folio		*tmpfolio;
>>    
>>    	/* When taking over an array from a different personality, we store
>>    	 * the new thread here until we fully activate the array.
>> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
>> index 407925951299..43453f1a04f4 100644
>> --- a/drivers/md/raid1.c
>> +++ b/drivers/md/raid1.c
>> @@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>>    			      rdev->recovery_offset >= sect + s)) &&
>>    			    rdev_has_badblock(rdev, sect, s) == 0) {
>>    				atomic_inc(&rdev->nr_pending);
>> -				if (sync_page_io(rdev, sect, s<<9,
>> -					 conf->tmppage, REQ_OP_READ, false))
>> +				if (sync_folio_io(rdev, sect, s<<9, 0,
>> +					 conf->tmpfolio, REQ_OP_READ, false))
>>    					success = 1;
>>    				rdev_dec_pending(rdev, mddev);
>>    				if (success)
>> @@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>>    			    !test_bit(Faulty, &rdev->flags)) {
>>    				atomic_inc(&rdev->nr_pending);
>>    				r1_sync_page_io(rdev, sect, s,
>> -						conf->tmppage, REQ_OP_WRITE);
>> +						folio_page(conf->tmpfolio, 0),
>> +						REQ_OP_WRITE);
>>    				rdev_dec_pending(rdev, mddev);
>>    			}
>>    		}
>> @@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>>    			    !test_bit(Faulty, &rdev->flags)) {
>>    				atomic_inc(&rdev->nr_pending);
>>    				if (r1_sync_page_io(rdev, sect, s,
>> -						conf->tmppage, REQ_OP_READ)) {
>> +						folio_page(conf->tmpfolio, 0),
>> +						REQ_OP_READ)) {
>>    					atomic_add(s, &rdev->corrected_errors);
>>    					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
>>    						mdname(mddev), s,
>> @@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>>    	if (!conf->mirrors)
>>    		goto abort;
>>    
>> -	conf->tmppage = alloc_page(GFP_KERNEL);
>> -	if (!conf->tmppage)
>> +	conf->tmpfolio = folio_alloc(GFP_KERNEL, 0);
>> +	if (!conf->tmpfolio)
>>    		goto abort;
>>    
>>    	r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
>> @@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>>    	if (conf) {
>>    		mempool_destroy(conf->r1bio_pool);
>>    		kfree(conf->mirrors);
>> -		safe_put_page(conf->tmppage);
>> +		folio_put(conf->tmpfolio);
> 
> Is this safe? folio_put() can't be called with NULL.
> 

Yeah, should we introduce safe_put_folio()? Or just check NULL here.

>>    		kfree(conf->nr_pending);
>>    		kfree(conf->nr_waiting);
>>    		kfree(conf->nr_queued);
>> @@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
>>    
>>    	mempool_destroy(conf->r1bio_pool);
>>    	kfree(conf->mirrors);
>> -	safe_put_page(conf->tmppage);
>> +	folio_put(conf->tmpfolio);
> 
> Same here.
> 

In raid1_free(), setup_conf() is successful, and conf->tmpfolio must not be
NULL. It is safe.

>>    	kfree(conf->nr_pending);
>>    	kfree(conf->nr_waiting);
>>    	kfree(conf->nr_queued);
> 

-- 
Thanks,
Nan

Re: [PATCH v2 03/14] md/raid1: use folio for tmppage
Posted by Yu Kuai 4 days, 14 hours ago
Hi,

在 2026/2/5 15:23, Li Nan 写道:
> Yeah, should we introduce safe_put_folio()? Or just check NULL here.

Yes, and don't copy implementation. Just convert safe_put_page(page) to
safe_put_folio(page_folio(page)) first.

-- 
Thansk,
Kuai
Re: [PATCH v2 03/14] md/raid1: use folio for tmppage
Posted by Li Nan 3 days, 13 hours ago

在 2026/2/5 15:33, Yu Kuai 写道:
> Hi,
> 
> 在 2026/2/5 15:23, Li Nan 写道:
>> Yeah, should we introduce safe_put_folio()? Or just check NULL here.
> 
> Yes, and don't copy implementation. Just convert safe_put_page(page) to
> safe_put_folio(page_folio(page)) first.
> 

page cannot be NULL in page_folio(), and the code would be:

if (page)
   safe_put_folio(page_folio(page))

This also looks odd. Keeping both functions and removing the page one
after the last reference to it in RAID5 is removed seems better. What do
you think?

-- 
Thanks,
Nan