From: Li Nan <linan122@huawei.com>
Convert tmppage to tmpfolio and use it throughout in raid1.
Signed-off-by: Li Nan <linan122@huawei.com>
---
drivers/md/raid1.h | 2 +-
drivers/md/raid1.c | 18 ++++++++++--------
2 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c98d43a7ae99..d480b3a8c2c4 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -101,7 +101,7 @@ struct r1conf {
/* temporary buffer to synchronous IO when attempting to repair
* a read error.
*/
- struct page *tmppage;
+ struct folio *tmpfolio;
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 407925951299..43453f1a04f4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
rdev->recovery_offset >= sect + s)) &&
rdev_has_badblock(rdev, sect, s) == 0) {
atomic_inc(&rdev->nr_pending);
- if (sync_page_io(rdev, sect, s<<9,
- conf->tmppage, REQ_OP_READ, false))
+ if (sync_folio_io(rdev, sect, s<<9, 0,
+ conf->tmpfolio, REQ_OP_READ, false))
success = 1;
rdev_dec_pending(rdev, mddev);
if (success)
@@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
!test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
r1_sync_page_io(rdev, sect, s,
- conf->tmppage, REQ_OP_WRITE);
+ folio_page(conf->tmpfolio, 0),
+ REQ_OP_WRITE);
rdev_dec_pending(rdev, mddev);
}
}
@@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
!test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
if (r1_sync_page_io(rdev, sect, s,
- conf->tmppage, REQ_OP_READ)) {
+ folio_page(conf->tmpfolio, 0),
+ REQ_OP_READ)) {
atomic_add(s, &rdev->corrected_errors);
pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
mdname(mddev), s,
@@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf->mirrors)
goto abort;
- conf->tmppage = alloc_page(GFP_KERNEL);
- if (!conf->tmppage)
+ conf->tmpfolio = folio_alloc(GFP_KERNEL, 0);
+ if (!conf->tmpfolio)
goto abort;
r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
@@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (conf) {
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
- safe_put_page(conf->tmppage);
+ folio_put(conf->tmpfolio);
kfree(conf->nr_pending);
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
@@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
- safe_put_page(conf->tmppage);
+ folio_put(conf->tmpfolio);
kfree(conf->nr_pending);
kfree(conf->nr_waiting);
kfree(conf->nr_queued);
--
2.39.2
On Wed, Dec 17, 2025 at 8:11 PM <linan666@huaweicloud.com> wrote:
>
> From: Li Nan <linan122@huawei.com>
>
> Convert tmppage to tmpfolio and use it throughout in raid1.
>
> Signed-off-by: Li Nan <linan122@huawei.com>
> ---
> drivers/md/raid1.h | 2 +-
> drivers/md/raid1.c | 18 ++++++++++--------
> 2 files changed, 11 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
> index c98d43a7ae99..d480b3a8c2c4 100644
> --- a/drivers/md/raid1.h
> +++ b/drivers/md/raid1.h
> @@ -101,7 +101,7 @@ struct r1conf {
> /* temporary buffer to synchronous IO when attempting to repair
> * a read error.
> */
> - struct page *tmppage;
> + struct folio *tmpfolio;
>
> /* When taking over an array from a different personality, we store
> * the new thread here until we fully activate the array.
> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
> index 407925951299..43453f1a04f4 100644
> --- a/drivers/md/raid1.c
> +++ b/drivers/md/raid1.c
> @@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
> rdev->recovery_offset >= sect + s)) &&
> rdev_has_badblock(rdev, sect, s) == 0) {
> atomic_inc(&rdev->nr_pending);
> - if (sync_page_io(rdev, sect, s<<9,
> - conf->tmppage, REQ_OP_READ, false))
> + if (sync_folio_io(rdev, sect, s<<9, 0,
> + conf->tmpfolio, REQ_OP_READ, false))
> success = 1;
> rdev_dec_pending(rdev, mddev);
> if (success)
> @@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
> !test_bit(Faulty, &rdev->flags)) {
> atomic_inc(&rdev->nr_pending);
> r1_sync_page_io(rdev, sect, s,
> - conf->tmppage, REQ_OP_WRITE);
> + folio_page(conf->tmpfolio, 0),
> + REQ_OP_WRITE);
> rdev_dec_pending(rdev, mddev);
> }
> }
> @@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
> !test_bit(Faulty, &rdev->flags)) {
> atomic_inc(&rdev->nr_pending);
> if (r1_sync_page_io(rdev, sect, s,
> - conf->tmppage, REQ_OP_READ)) {
> + folio_page(conf->tmpfolio, 0),
> + REQ_OP_READ)) {
> atomic_add(s, &rdev->corrected_errors);
> pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
> mdname(mddev), s,
> @@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
> if (!conf->mirrors)
> goto abort;
>
> - conf->tmppage = alloc_page(GFP_KERNEL);
> - if (!conf->tmppage)
> + conf->tmpfolio = folio_alloc(GFP_KERNEL, 0);
> + if (!conf->tmpfolio)
> goto abort;
>
> r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
> @@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
> if (conf) {
> mempool_destroy(conf->r1bio_pool);
> kfree(conf->mirrors);
> - safe_put_page(conf->tmppage);
> + folio_put(conf->tmpfolio);
> kfree(conf->nr_pending);
> kfree(conf->nr_waiting);
> kfree(conf->nr_queued);
> @@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
>
> mempool_destroy(conf->r1bio_pool);
> kfree(conf->mirrors);
> - safe_put_page(conf->tmppage);
> + folio_put(conf->tmpfolio);
> kfree(conf->nr_pending);
> kfree(conf->nr_waiting);
> kfree(conf->nr_queued);
> --
> 2.39.2
>
Hi Nan
Same question for patch04 and patch05, tmpage is used in read io path.
From the cover letter, this patch set wants to resolve the multi pages
in sync io path. Is it better to keep them for your future patch set?
Best Regards
Xiao
Xiao
在 2026/1/19 11:20, Xiao Ni 写道:
> On Wed, Dec 17, 2025 at 8:11 PM <linan666@huaweicloud.com> wrote:
>> From: Li Nan <linan122@huawei.com>
>>
>> Convert tmppage to tmpfolio and use it throughout in raid1.
>>
>> Signed-off-by: Li Nan <linan122@huawei.com>
>> ---
>> drivers/md/raid1.h | 2 +-
>> drivers/md/raid1.c | 18 ++++++++++--------
>> 2 files changed, 11 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
>> index c98d43a7ae99..d480b3a8c2c4 100644
>> --- a/drivers/md/raid1.h
>> +++ b/drivers/md/raid1.h
>> @@ -101,7 +101,7 @@ struct r1conf {
>> /* temporary buffer to synchronous IO when attempting to repair
>> * a read error.
>> */
>> - struct page *tmppage;
>> + struct folio *tmpfolio;
>>
>> /* When taking over an array from a different personality, we store
>> * the new thread here until we fully activate the array.
>> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
>> index 407925951299..43453f1a04f4 100644
>> --- a/drivers/md/raid1.c
>> +++ b/drivers/md/raid1.c
>> @@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>> rdev->recovery_offset >= sect + s)) &&
>> rdev_has_badblock(rdev, sect, s) == 0) {
>> atomic_inc(&rdev->nr_pending);
>> - if (sync_page_io(rdev, sect, s<<9,
>> - conf->tmppage, REQ_OP_READ, false))
>> + if (sync_folio_io(rdev, sect, s<<9, 0,
>> + conf->tmpfolio, REQ_OP_READ, false))
>> success = 1;
>> rdev_dec_pending(rdev, mddev);
>> if (success)
>> @@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>> !test_bit(Faulty, &rdev->flags)) {
>> atomic_inc(&rdev->nr_pending);
>> r1_sync_page_io(rdev, sect, s,
>> - conf->tmppage, REQ_OP_WRITE);
>> + folio_page(conf->tmpfolio, 0),
>> + REQ_OP_WRITE);
>> rdev_dec_pending(rdev, mddev);
>> }
>> }
>> @@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
>> !test_bit(Faulty, &rdev->flags)) {
>> atomic_inc(&rdev->nr_pending);
>> if (r1_sync_page_io(rdev, sect, s,
>> - conf->tmppage, REQ_OP_READ)) {
>> + folio_page(conf->tmpfolio, 0),
>> + REQ_OP_READ)) {
>> atomic_add(s, &rdev->corrected_errors);
>> pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
>> mdname(mddev), s,
>> @@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>> if (!conf->mirrors)
>> goto abort;
>>
>> - conf->tmppage = alloc_page(GFP_KERNEL);
>> - if (!conf->tmppage)
>> + conf->tmpfolio = folio_alloc(GFP_KERNEL, 0);
>> + if (!conf->tmpfolio)
>> goto abort;
>>
>> r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
>> @@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>> if (conf) {
>> mempool_destroy(conf->r1bio_pool);
>> kfree(conf->mirrors);
>> - safe_put_page(conf->tmppage);
>> + folio_put(conf->tmpfolio);
>> kfree(conf->nr_pending);
>> kfree(conf->nr_waiting);
>> kfree(conf->nr_queued);
>> @@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
>>
>> mempool_destroy(conf->r1bio_pool);
>> kfree(conf->mirrors);
>> - safe_put_page(conf->tmppage);
>> + folio_put(conf->tmpfolio);
>> kfree(conf->nr_pending);
>> kfree(conf->nr_waiting);
>> kfree(conf->nr_queued);
>> --
>> 2.39.2
>>
> Hi Nan
>
> Same question for patch04 and patch05, tmpage is used in read io path.
> From the cover letter, this patch set wants to resolve the multi pages
> in sync io path. Is it better to keep them for your future patch set?
>
> Best Regards
> Xiao
>
> Xiao
After reading patch06, I understand here. r1_sync_page_io needs to
change to r1_sync_folio_io to handle sync read error. Please ignore my
above comments. patch04 and patch05 look good to me.
Best Regards
Xiao
在 2026/1/20 11:38, Xiao Ni 写道:
>
> 在 2026/1/19 11:20, Xiao Ni 写道:
>> On Wed, Dec 17, 2025 at 8:11 PM <linan666@huaweicloud.com> wrote:
>>> From: Li Nan <linan122@huawei.com>
>>>
>>> Convert tmppage to tmpfolio and use it throughout in raid1.
>>>
>>> Signed-off-by: Li Nan <linan122@huawei.com>
>>> ---
>>> drivers/md/raid1.h | 2 +-
>>> drivers/md/raid1.c | 18 ++++++++++--------
>>> 2 files changed, 11 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
>>> index c98d43a7ae99..d480b3a8c2c4 100644
>>> --- a/drivers/md/raid1.h
>>> +++ b/drivers/md/raid1.h
>>> @@ -101,7 +101,7 @@ struct r1conf {
>>> /* temporary buffer to synchronous IO when attempting to repair
>>> * a read error.
>>> */
>>> - struct page *tmppage;
>>> + struct folio *tmpfolio;
>>>
>>> /* When taking over an array from a different personality, we
>>> store
>>> * the new thread here until we fully activate the array.
>>> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
>>> index 407925951299..43453f1a04f4 100644
>>> --- a/drivers/md/raid1.c
>>> +++ b/drivers/md/raid1.c
>>> @@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf,
>>> struct r1bio *r1_bio)
>>> rdev->recovery_offset >= sect + s)) &&
>>> rdev_has_badblock(rdev, sect, s) == 0) {
>>> atomic_inc(&rdev->nr_pending);
>>> - if (sync_page_io(rdev, sect, s<<9,
>>> - conf->tmppage, REQ_OP_READ,
>>> false))
>>> + if (sync_folio_io(rdev, sect, s<<9, 0,
>>> + conf->tmpfolio, REQ_OP_READ,
>>> false))
>>> success = 1;
>>> rdev_dec_pending(rdev, mddev);
>>> if (success)
>>> @@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf,
>>> struct r1bio *r1_bio)
>>> !test_bit(Faulty, &rdev->flags)) {
>>> atomic_inc(&rdev->nr_pending);
>>> r1_sync_page_io(rdev, sect, s,
>>> - conf->tmppage,
>>> REQ_OP_WRITE);
>>> +
>>> folio_page(conf->tmpfolio, 0),
>>> + REQ_OP_WRITE);
>>> rdev_dec_pending(rdev, mddev);
>>> }
>>> }
>>> @@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf,
>>> struct r1bio *r1_bio)
>>> !test_bit(Faulty, &rdev->flags)) {
>>> atomic_inc(&rdev->nr_pending);
>>> if (r1_sync_page_io(rdev, sect, s,
>>> - conf->tmppage,
>>> REQ_OP_READ)) {
>>> +
>>> folio_page(conf->tmpfolio, 0),
>>> + REQ_OP_READ)) {
>>> atomic_add(s,
>>> &rdev->corrected_errors);
>>> pr_info("md/raid1:%s: read
>>> error corrected (%d sectors at %llu on %pg)\n",
>>> mdname(mddev), s,
>>> @@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>>> if (!conf->mirrors)
>>> goto abort;
>>>
>>> - conf->tmppage = alloc_page(GFP_KERNEL);
>>> - if (!conf->tmppage)
>>> + conf->tmpfolio = folio_alloc(GFP_KERNEL, 0);
>>> + if (!conf->tmpfolio)
>>> goto abort;
>>>
>>> r1bio_size = offsetof(struct r1bio, bios[mddev->raid_disks * 2]);
>>> @@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
>>> if (conf) {
>>> mempool_destroy(conf->r1bio_pool);
>>> kfree(conf->mirrors);
>>> - safe_put_page(conf->tmppage);
>>> + folio_put(conf->tmpfolio);
>>> kfree(conf->nr_pending);
>>> kfree(conf->nr_waiting);
>>> kfree(conf->nr_queued);
>>> @@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void
>>> *priv)
>>>
>>> mempool_destroy(conf->r1bio_pool);
>>> kfree(conf->mirrors);
>>> - safe_put_page(conf->tmppage);
>>> + folio_put(conf->tmpfolio);
>>> kfree(conf->nr_pending);
>>> kfree(conf->nr_waiting);
>>> kfree(conf->nr_queued);
>>> --
>>> 2.39.2
>>>
>> Hi Nan
>>
>> Same question for patch04 and patch05, tmpage is used in read io path.
>> From the cover letter, this patch set wants to resolve the multi pages
>> in sync io path. Is it better to keep them for your future patch set?
>>
>> Best Regards
>> Xiao
>>
>> Xiao
>
>
> After reading patch06, I understand here. r1_sync_page_io needs to change
> to r1_sync_folio_io to handle sync read error. Please ignore my above
> comments. patch04 and patch05 look good to me.
>
> Best Regards
>
> Xiao
>
Thanks for your patient review.
--
Thanks,
Nan
© 2016 - 2026 Red Hat, Inc.