From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AE6D63382F9; Wed, 28 Jan 2026 08:06:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587579; cv=none; b=NkubgcsJUI/qGo94ttwPmAuddaMNGIVeg9SOXxfwSp/GsCDPJMPMBz2yFg6ylcL1uuH0STo9dUuqTu+dllSXr1RMAp4+x8lV88yC1xzEuTz/sV+wnQnlTi56ENbae7qB25DHGoiZKOp91nJNHoI7ZTV5P2D2AELzRJdhMc0yfBs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587579; c=relaxed/simple; bh=Z81F5faV8Go/v42IbDRj5OpLIq2tSbyJukEKkImbA/M=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=ML7CIUc/ys31vDz99laAo2vSxKL9CWR1IMtoPFm21Z+dlmMClCPPFGe4XeDN//zL+PN+RqNyJCriBwQclT3GLv1nEv5YfxUslZ4F0/Zs2by7N7brlT7/kQTCbAl0/UKXrpkd0+DuTQm16EJN0qg5sty9qf1ZLXdf7M5FbaG3N7k= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4f1FFY0bg0zYQtxM; Wed, 28 Jan 2026 16:05:37 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 08AF64056F; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S5; Wed, 28 Jan 2026 16:06:13 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 01/14] md/raid1,raid10: clean up of RESYNC_SECTORS Date: Wed, 28 Jan 2026 15:56:55 +0800 Message-Id: <20260128075708.2259525-2-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S5 X-Coremail-Antispam: 1UD129KBjvJXoWxJr4rtF4rJFWUZryDKrWrGrg_yoW5JFy8pa 1DGrySvw45KF47Jas7JayUua1Yy3Zrt3yUCrn5Za95uFy3XrZrXrWjqayYgF1DXFn5tFy2 q3WDCr4UZFy3taUanT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUmC14x267AKxVW5JVWrJwAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_Jr4l82xGYIkIc2 x26xkF7I0E14v26r1I6r4UM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2z4x0 Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJw A2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq3wAa c4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0VAKzV Aqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4UJwAm 72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20VAGYx C7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCFx2Iq xVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14v26r 106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY67AK xVWUJVWUCwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr0_Cr1lIxAIcVCF04k26cxKx2IYs7 xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_Gr0_ Gr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUfCzNUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Move redundant RESYNC_SECTORS definition from raid1 and raid10 implementations to raid1-10.c. Simplify max_sync assignment in raid10_sync_request(). No functional changes. Signed-off-by: Li Nan Reviewed-by: Yu Kuai --- drivers/md/raid1-10.c | 1 + drivers/md/raid1.c | 1 - drivers/md/raid10.c | 4 +--- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 521625756128..260d7fd7ccbe 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -2,6 +2,7 @@ /* Maximum size of each resync request */ #define RESYNC_BLOCK_SIZE (64*1024) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) +#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) =20 /* * Number of guaranteed raid bios in case of extreme VM load: diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 00120c86c443..407925951299 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -136,7 +136,6 @@ static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r= 1conf *conf) } =20 #define RESYNC_DEPTH 32 -#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1adad768e277..1e57d9ce98e7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -113,7 +113,6 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *= data) return kzalloc(size, gfp_flags); } =20 -#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) /* amount of memory to reserve for resync requests */ #define RESYNC_WINDOW (1024*1024) /* maximum number of concurrent requests, memory permitting */ @@ -3171,7 +3170,7 @@ static sector_t raid10_sync_request(struct mddev *mdd= ev, sector_t sector_nr, struct bio *biolist =3D NULL, *bio; sector_t nr_sectors; int i; - int max_sync; + int max_sync =3D RESYNC_SECTORS; sector_t sync_blocks; sector_t chunk_mask =3D conf->geo.chunk_mask; int page_idx =3D 0; @@ -3284,7 +3283,6 @@ static sector_t raid10_sync_request(struct mddev *mdd= ev, sector_t sector_nr, * end_sync_write if we will want to write. */ =20 - max_sync =3D RESYNC_PAGES << (PAGE_SHIFT-9); if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { /* recovery... the complicated one */ int j; --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 96CEF33ADA4; Wed, 28 Jan 2026 08:06:21 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587584; cv=none; b=f0H7HKWhO1iI0F81ksq9VLkN+CH792q62m84/41xVrQFQMQAArn2Olkd77pX2GsdbIQ33uktcAe//2Hrb8aFHLBFAzW9MZ5p44q/ns5GiHDUYR638M5ic4jwdSQOUZNj+m8KcjjOkJEP3YUvW70tUvbIGT7MDkzZPHFQfYef8NA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587584; c=relaxed/simple; bh=SdcuBczE9qsYacUWY9nQj3tapNX9F04YrXw7xwOJ9L8=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=k161oajkiZ/NFhG6i6jsvPa/PMJ1/IeSXZRn/6RzMKEtZXXE872b1roO7TjjPzTaGhIFkEjl4FbRkiJjr4q+tgZYNQMialUsa435Si1SKVqZzhh8rtuSuldF1y+lGxokCiwgRcl4JPbWBdHmuhjMo/lKHxOngH+hktsd/ew12P4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=none smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.177]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4f1FG16tnlzKHMl0; Wed, 28 Jan 2026 16:06:01 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 2A79040592; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S6; Wed, 28 Jan 2026 16:06:13 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 02/14] md: introduce sync_folio_io for folio support in RAID Date: Wed, 28 Jan 2026 15:56:56 +0800 Message-Id: <20260128075708.2259525-3-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S6 X-Coremail-Antispam: 1UD129KBjvJXoWxXw4DXF1xCF4UCF4xJFWfGrg_yoW5Gr1fpa 4jkF9xG3y5Zw42gw13JFs7Ca4Sq34IgrWUtryfuayfW3W7KryDKF45tF1jvF98GF98CF4x t34jgay5urn5Wr7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUmC14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_Jryl82xGYIkIc2 x26xkF7I0E14v26r4j6ryUM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2z4x0 Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJw A2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq3wAa c4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0VAKzV Aqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4UJwAm 72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20VAGYx C7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCFx2Iq xVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14v26r 106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY67AK xVWUJVWUCwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr0_Cr1lIxAIcVCF04k26cxKx2IYs7 xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_Gr0_ Gr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUFD73UUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Prepare for folio support in RAID by introducing sync_folio_io(), matching sync_page_io()'s functionality. Differences are: - Replace input parameter 'page' with 'folio' - Replace __bio_add_page() calls with bio_add_folio_nofail() - Add new parameter 'off' to prepare for adding a folio to bio in segments, e.g. in fix_recovery_read_error() sync_page_io() will be removed once full folio support is complete. Signed-off-by: Li Nan --- drivers/md/md.h | 2 ++ drivers/md/md.c | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/drivers/md/md.h b/drivers/md/md.h index a083f37374d0..410f8a6b75e7 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -920,6 +920,8 @@ void md_write_metadata(struct mddev *mddev, struct md_r= dev *rdev, extern int md_super_wait(struct mddev *mddev); extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct page *page, blk_opf_t opf, bool metadata_op); +extern int sync_folio_io(struct md_rdev *rdev, sector_t sector, int size, + int off, struct folio *folio, blk_opf_t opf, bool metadata_op); extern void md_do_sync(struct md_thread *thread); extern void md_new_event(void); extern void md_allow_write(struct mddev *mddev); diff --git a/drivers/md/md.c b/drivers/md/md.c index 5df2220b1bd1..b8c8a16cf037 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1192,6 +1192,33 @@ int sync_page_io(struct md_rdev *rdev, sector_t sect= or, int size, } EXPORT_SYMBOL_GPL(sync_page_io); =20 +int sync_folio_io(struct md_rdev *rdev, sector_t sector, int size, int off, + struct folio *folio, blk_opf_t opf, bool metadata_op) +{ + struct bio bio; + struct bio_vec bvec; + + if (metadata_op && rdev->meta_bdev) + bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); + else + bio_init(&bio, rdev->bdev, &bvec, 1, opf); + + if (metadata_op) + bio.bi_iter.bi_sector =3D sector + rdev->sb_start; + else if (rdev->mddev->reshape_position !=3D MaxSector && + (rdev->mddev->reshape_backwards =3D=3D + (sector >=3D rdev->mddev->reshape_position))) + bio.bi_iter.bi_sector =3D sector + rdev->new_data_offset; + else + bio.bi_iter.bi_sector =3D sector + rdev->data_offset; + bio_add_folio_nofail(&bio, folio, size, off); + + submit_bio_wait(&bio); + + return !bio.bi_status; +} +EXPORT_SYMBOL_GPL(sync_folio_io); + static int read_disk_sb(struct md_rdev *rdev, int size) { if (rdev->sb_loaded) --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AE5B6337BB0; Wed, 28 Jan 2026 08:06:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587580; cv=none; b=njThAt1WZjy3mCR/gYwSUhZCWFrwwQr5nsv0CyMNaYFJo5CvochwkSuCH9pmnf7cxCtAnd5orLfOR64VEA/OgwpxCkiD07GXR+oW3E2h7rcvv4kSM+oHNxUFOPofBh+aVYLpNaqA3+FepjXx3XhWf/neNF74fVnmT8LqoKjGaZA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587580; c=relaxed/simple; bh=uPbGkRkdQ/Qs9Cac59z9ZZlpwoX+7kYt4639x6+FwnY=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=jS/15ee+JGs0jFuwUu6p3ylHxbtdF2lViJAJjOMgN35xccQr6uPU5x1rUjcAsa0K0YBosxbbb4DDokEluQ80DfYvSbupEWINf+Ut2ABhCm/0r/1MMNaWkEqjjhtVvyU/HPMgf07VHLBW2M+pfehBPzeUt5qIBUFrN/nIrYXeBRc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.177]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4f1FFY1md9zYQv07; Wed, 28 Jan 2026 16:05:37 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 33FE74058D; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S7; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 03/14] md/raid1: use folio for tmppage Date: Wed, 28 Jan 2026 15:56:57 +0800 Message-Id: <20260128075708.2259525-4-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S7 X-Coremail-Antispam: 1UD129KBjvJXoWxZF48CF1rKrW5Jw4fGFWfZrb_yoW5CFWUpa n8G3Z5tr4UJr98JryDJFWkua4Sgw1xKayjkFZrG3yS9FsaqF95ZayYk34jgr1DXF98Ja4x XFZ8trW3ZF1rtF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUmC14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JrWl82xGYIkIc2 x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2z4x0 Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJw A2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq3wAa c4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0VAKzV Aqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4UJwAm 72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20VAGYx C7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCFx2Iq xVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14v26r 106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY67AK xVWUJVWUCwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr0_Cr1lIxAIcVCF04k26cxKx2IYs7 xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_Gr0_ Gr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUB89NUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Convert tmppage to tmpfolio and use it throughout in raid1. Signed-off-by: Li Nan Reviewed-by: Xiao Ni --- drivers/md/raid1.h | 2 +- drivers/md/raid1.c | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index c98d43a7ae99..d480b3a8c2c4 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -101,7 +101,7 @@ struct r1conf { /* temporary buffer to synchronous IO when attempting to repair * a read error. */ - struct page *tmppage; + struct folio *tmpfolio; =20 /* When taking over an array from a different personality, we store * the new thread here until we fully activate the array. diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 407925951299..43453f1a04f4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) rdev->recovery_offset >=3D sect + s)) && rdev_has_badblock(rdev, sect, s) =3D=3D 0) { atomic_inc(&rdev->nr_pending); - if (sync_page_io(rdev, sect, s<<9, - conf->tmppage, REQ_OP_READ, false)) + if (sync_folio_io(rdev, sect, s<<9, 0, + conf->tmpfolio, REQ_OP_READ, false)) success =3D 1; rdev_dec_pending(rdev, mddev); if (success) @@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); r1_sync_page_io(rdev, sect, s, - conf->tmppage, REQ_OP_WRITE); + folio_page(conf->tmpfolio, 0), + REQ_OP_WRITE); rdev_dec_pending(rdev, mddev); } } @@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); if (r1_sync_page_io(rdev, sect, s, - conf->tmppage, REQ_OP_READ)) { + folio_page(conf->tmpfolio, 0), + REQ_OP_READ)) { atomic_add(s, &rdev->corrected_errors); pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg= )\n", mdname(mddev), s, @@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (!conf->mirrors) goto abort; =20 - conf->tmppage =3D alloc_page(GFP_KERNEL); - if (!conf->tmppage) + conf->tmpfolio =3D folio_alloc(GFP_KERNEL, 0); + if (!conf->tmpfolio) goto abort; =20 r1bio_size =3D offsetof(struct r1bio, bios[mddev->raid_disks * 2]); @@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (conf) { mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); - safe_put_page(conf->tmppage); + folio_put(conf->tmpfolio); kfree(conf->nr_pending); kfree(conf->nr_waiting); kfree(conf->nr_queued); @@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void *pri= v) =20 mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); - safe_put_page(conf->tmppage); + folio_put(conf->tmpfolio); kfree(conf->nr_pending); kfree(conf->nr_waiting); kfree(conf->nr_queued); --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AE632337BB5; Wed, 28 Jan 2026 08:06:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587579; cv=none; b=iuNt2M5upefTi9M18fSS6zAnYcDSx4OnlHArbKtthRPdz5Gjt/ph9Sm19DYbtC7Rcwe50JpgG1F/vWpKYG1YqiQEfJ9i6GyG9FvQwsf1OlhsSunag7ivf+vJyfl5EFl0Nx19/qkpX8EA38Ksa4dSypES6Ju7kWlHF34IRi2yHx4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587579; c=relaxed/simple; bh=pAjD63mRAWWvzH6X+Se7BDY3sx5axMN3Xu035H+ssTA=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=L6BqgPLKW4STYzy++Gpn1W4twnogVoAg8BJpC+5GgjaDlCpEjGOiRJJhrZLmHuSjmBVmUdZ9+iHVwXACA2lcUVphYW4VCw+nXwsadvRhfEMwNJtonQczsWsGjrlvW9+4uSVN4DV9gkwq2ynjc3U9FPvJlCwdOuS/Oni4ar1DwgI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4f1FFY2WpRzYQv0Z; Wed, 28 Jan 2026 16:05:37 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 4C0A34056B; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S8; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 04/14] md/raid10: use folio for tmppage Date: Wed, 28 Jan 2026 15:56:58 +0800 Message-Id: <20260128075708.2259525-5-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S8 X-Coremail-Antispam: 1UD129KBjvJXoWxAr18Kr1xGF1fCw13Gr18uFg_yoW5Kw4fpa 1DGasIyrWUJw43Xw1DJayDC3WrK34SkFWUtrZ7W3yfua1ftr95K3WUJ3yjgFyDXF98JF1x XFW5XrW3u3Z7tF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQj14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUJVWUCwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr0_Cr1lIxAIcVCF04k26cxKx2 IYs7xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_ Gr0_Gr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Convert tmppage to tmpfolio and use it throughout in raid10. Signed-off-by: Li Nan Reviewed-by: Xiao Ni --- drivers/md/raid10.h | 2 +- drivers/md/raid10.c | 37 +++++++++++++++++++------------------ 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index ec79d87fb92f..19f37439a4e2 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -89,7 +89,7 @@ struct r10conf { =20 mempool_t r10bio_pool; mempool_t r10buf_pool; - struct page *tmppage; + struct folio *tmpfolio; struct bio_set bio_split; =20 /* When taking over an array from a different personality, we store diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1e57d9ce98e7..09238dc9cde6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2581,13 +2581,13 @@ static void recovery_request_write(struct mddev *md= dev, struct r10bio *r10_bio) } } =20 -static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, - int sectors, struct page *page, enum req_op op) +static int r10_sync_folio_io(struct md_rdev *rdev, sector_t sector, + int sectors, struct folio *folio, enum req_op op) { if (rdev_has_badblock(rdev, sector, sectors) && (op =3D=3D REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags))) return -1; - if (sync_page_io(rdev, sector, sectors << 9, page, op, false)) + if (sync_folio_io(rdev, sector, sectors << 9, 0, folio, op, false)) /* success */ return 1; if (op =3D=3D REQ_OP_WRITE) { @@ -2650,12 +2650,13 @@ static void fix_read_error(struct r10conf *conf, st= ruct mddev *mddev, struct r10 r10_bio->devs[sl].addr + sect, s) =3D=3D 0) { atomic_inc(&rdev->nr_pending); - success =3D sync_page_io(rdev, - r10_bio->devs[sl].addr + - sect, - s<<9, - conf->tmppage, - REQ_OP_READ, false); + success =3D sync_folio_io(rdev, + r10_bio->devs[sl].addr + + sect, + s<<9, + 0, + conf->tmpfolio, + REQ_OP_READ, false); rdev_dec_pending(rdev, mddev); if (success) break; @@ -2698,10 +2699,10 @@ static void fix_read_error(struct r10conf *conf, st= ruct mddev *mddev, struct r10 continue; =20 atomic_inc(&rdev->nr_pending); - if (r10_sync_page_io(rdev, - r10_bio->devs[sl].addr + - sect, - s, conf->tmppage, REQ_OP_WRITE) + if (r10_sync_folio_io(rdev, + r10_bio->devs[sl].addr + + sect, + s, conf->tmpfolio, REQ_OP_WRITE) =3D=3D 0) { /* Well, this device is dead */ pr_notice("md/raid10:%s: read correction write failed (%d sectors at %= llu on %pg)\n", @@ -2730,10 +2731,10 @@ static void fix_read_error(struct r10conf *conf, st= ruct mddev *mddev, struct r10 continue; =20 atomic_inc(&rdev->nr_pending); - switch (r10_sync_page_io(rdev, + switch (r10_sync_folio_io(rdev, r10_bio->devs[sl].addr + sect, - s, conf->tmppage, REQ_OP_READ)) { + s, conf->tmpfolio, REQ_OP_READ)) { case 0: /* Well, this device is dead */ pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sec= tors at %llu on %pg)\n", @@ -3841,7 +3842,7 @@ static void raid10_free_conf(struct r10conf *conf) kfree(conf->mirrors); kfree(conf->mirrors_old); kfree(conf->mirrors_new); - safe_put_page(conf->tmppage); + folio_put(conf->tmpfolio); bioset_exit(&conf->bio_split); kfree(conf); } @@ -3879,8 +3880,8 @@ static struct r10conf *setup_conf(struct mddev *mddev) if (!conf->mirrors) goto out; =20 - conf->tmppage =3D alloc_page(GFP_KERNEL); - if (!conf->tmppage) + conf->tmpfolio =3D folio_alloc(GFP_KERNEL, 0); + if (!conf->tmpfolio) goto out; =20 conf->geo =3D geo; --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AE50E337B81; Wed, 28 Jan 2026 08:06:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587580; cv=none; b=ZlGWoG6X5tXmZ3ojCPZGjXSpZZbM1Ip5hu3qYdBJ33T2+e/0Dteogd7BwJ4MgsTIVFJXsR8rxvn8sxNOvos7OdOQJu5TmwIQ+1RnBUgcsi3zAYubd0/U/GCsOXnsv0JtgRskeJ7IThWR3NLJ8unnSrXdw0awcPJnH8VU+D8OcDw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587580; c=relaxed/simple; bh=NH2nuKU/lF/0yIGYleLfgzoAlBVTO0Wk+yBBtj+0hsU=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=VIwaSrDHOZQWogcfhB7kwoZa8hYpE2nY5652oldin4u1icF6EnqK9aEp471/AdwpeWAEjI6jDcE+DX/275zLINtwEnsxBj+qFoyiq152E4i9aRAv/t8aEiyR7lLCnHUegwWwmqXI6wi1uY71tZLKwQGy7kkSbMHtUfG2zb4oHpw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4f1FFY3NC0zYQv0Z; Wed, 28 Jan 2026 16:05:37 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 64F3340539; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S9; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 05/14] md/raid1,raid10: use folio for sync path IO Date: Wed, 28 Jan 2026 15:56:59 +0800 Message-Id: <20260128075708.2259525-6-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S9 X-Coremail-Antispam: 1UD129KBjvAXoWfCFWkJryxtr4DWF48WFyUJrb_yoW5AFy3Go Z3Jr4Sk3WrWr1rurWktr1xtFsrWan8Zw1fJF1xCrWqvFsruw15Kw47Jry5XrW2qF4aqF4a kr9agw1fXFZ2vr1xn29KB7ZKAUJUUUU8529EdanIXcx71UUUUU7v73VFW2AGmfu7bjvjm3 AaLaJ3UjIYCTnIWjp_UUUOD7AC8VAFwI0_Wr0E3s1l1xkIjI8I6I8E6xAIw20EY4v20xva j40_Wr0E3s1l1IIY67AEw4v_Jr0_Jr4l82xGYIkIc2x26280x7IE14v26r126s0DM28Irc Ia0xkI8VCY1x0267AKxVW5JVCq3wA2ocxC64kIII0Yj41l84x0c7CEw4AK67xGY2AK021l 84ACjcxK6xIIjxv20xvE14v26w1j6s0DM28EF7xvwVC0I7IYx2IY6xkF7I0E14v26r4UJV WxJr1l84ACjcxK6I8E87Iv67AKxVW0oVCq3wA2z4x0Y4vEx4A2jsIEc7CjxVAFwI0_GcCE 3s1lnxkEFVAIw20F6cxK64vIFxWle2I262IYc4CY6c8Ij28IcVAaY2xG8wAqx4xG64xvF2 IEw4CE5I8CrVC2j2WlYx0E2Ix0cI8IcVAFwI0_Jrv_JF1lYx0Ex4A2jsIE14v26r4UJVWx Jr1lOx8S6xCaFVCjc4AY6r1j6r4UM4x0Y48IcxkI7VAKI48JM4x0x7Aq67IIx4CEVc8vx2 IErcIFxwAKzVCY07xG64k0F24lc7CjxVAaw2AFwI0_JF0_Jw1l42xK82IYc2Ij64vIr41l 4I8I3I0E4IkC6x0Yz7v_Jr0_Gr1lx2IqxVAqx4xG67AKxVWUJVWUGwC20s026x8GjcxK67 AKxVWUGVWUWwC2zVAF1VAY17CE14v26r1q6r43MIIYrxkI7VAKI48JMIIF0xvE2Ix0cI8I cVAFwI0_JFI_Gr1lIxAIcVC0I7IYx2IY6xkF7I0E14v26r4j6F4UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVWUJVW8JwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Convert all IO on the sync path to use folios. Rename page-related identifiers to match folio. Retain some now-unnecessary while and for loops to minimize code changes, clean them up in a subsequent patch. Signed-off-by: Li Nan --- drivers/md/md.c | 2 +- drivers/md/raid1-10.c | 60 ++++-------- drivers/md/raid1.c | 173 +++++++++++++++------------------ drivers/md/raid10.c | 219 +++++++++++++++++++----------------------- 4 files changed, 196 insertions(+), 258 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index b8c8a16cf037..5b42b157263d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -9450,7 +9450,7 @@ static bool sync_io_within_limit(struct mddev *mddev) { /* * For raid456, sync IO is stripe(4k) per IO, for other levels, it's - * RESYNC_PAGES(64k) per IO. + * RESYNC_BLOCK_SIZE(64k) per IO. */ return atomic_read(&mddev->recovery_active) < (raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev); diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 260d7fd7ccbe..300fbe9dc02e 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -25,9 +25,9 @@ #define MAX_PLUG_BIO 32 =20 /* for managing resync I/O pages */ -struct resync_pages { +struct resync_folio { void *raid_bio; - struct page *pages[RESYNC_PAGES]; + struct folio *folio; }; =20 struct raid1_plug_cb { @@ -41,77 +41,55 @@ static void rbio_pool_free(void *rbio, void *data) kfree(rbio); } =20 -static inline int resync_alloc_pages(struct resync_pages *rp, +static inline int resync_alloc_folio(struct resync_folio *rf, gfp_t gfp_flags) { - int i; - - for (i =3D 0; i < RESYNC_PAGES; i++) { - rp->pages[i] =3D alloc_page(gfp_flags); - if (!rp->pages[i]) - goto out_free; - } + rf->folio =3D folio_alloc(gfp_flags, get_order(RESYNC_BLOCK_SIZE)); + if (!rf->folio) + return -ENOMEM; =20 return 0; - -out_free: - while (--i >=3D 0) - put_page(rp->pages[i]); - return -ENOMEM; } =20 -static inline void resync_free_pages(struct resync_pages *rp) +static inline void resync_free_folio(struct resync_folio *rf) { - int i; - - for (i =3D 0; i < RESYNC_PAGES; i++) - put_page(rp->pages[i]); + folio_put(rf->folio); } =20 -static inline void resync_get_all_pages(struct resync_pages *rp) +static inline void resync_get_folio(struct resync_folio *rf) { - int i; - - for (i =3D 0; i < RESYNC_PAGES; i++) - get_page(rp->pages[i]); + folio_get(rf->folio); } =20 -static inline struct page *resync_fetch_page(struct resync_pages *rp, - unsigned idx) +static inline struct folio *resync_fetch_folio(struct resync_folio *rf) { - if (WARN_ON_ONCE(idx >=3D RESYNC_PAGES)) - return NULL; - return rp->pages[idx]; + return rf->folio; } =20 /* - * 'strct resync_pages' stores actual pages used for doing the resync + * 'strct resync_folio' stores actual pages used for doing the resync * IO, and it is per-bio, so make .bi_private points to it. */ -static inline struct resync_pages *get_resync_pages(struct bio *bio) +static inline struct resync_folio *get_resync_folio(struct bio *bio) { return bio->bi_private; } =20 /* generally called after bio_reset() for reseting bvec */ -static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages= *rp, +static void md_bio_reset_resync_folio(struct bio *bio, struct resync_folio= *rf, int size) { - int idx =3D 0; - /* initialize bvec table again */ do { - struct page *page =3D resync_fetch_page(rp, idx); - int len =3D min_t(int, size, PAGE_SIZE); + struct folio *folio =3D resync_fetch_folio(rf); + int len =3D min_t(int, size, RESYNC_BLOCK_SIZE); =20 - if (WARN_ON(!bio_add_page(bio, page, len, 0))) { + if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) { bio->bi_status =3D BLK_STS_RESOURCE; bio_endio(bio); return; } - - size -=3D len; - } while (idx++ < RESYNC_PAGES && size > 0); + } while (0); } =20 =20 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 43453f1a04f4..d9c106529289 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -120,11 +120,11 @@ static void remove_serial(struct md_rdev *rdev, secto= r_t lo, sector_t hi) =20 /* * for resync bio, r1bio pointer can be retrieved from the per-bio - * 'struct resync_pages'. + * 'struct resync_folio'. */ static inline struct r1bio *get_resync_r1bio(struct bio *bio) { - return get_resync_pages(bio)->raid_bio; + return get_resync_folio(bio)->raid_bio; } =20 static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf) @@ -146,70 +146,69 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void = *data) struct r1conf *conf =3D data; struct r1bio *r1_bio; struct bio *bio; - int need_pages; + int need_folio; int j; - struct resync_pages *rps; + struct resync_folio *rfs; =20 r1_bio =3D r1bio_pool_alloc(gfp_flags, conf); if (!r1_bio) return NULL; =20 - rps =3D kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_pages), + rfs =3D kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_folio), gfp_flags); - if (!rps) + if (!rfs) goto out_free_r1bio; =20 /* * Allocate bios : 1 for reading, n-1 for writing */ for (j =3D conf->raid_disks * 2; j-- ; ) { - bio =3D bio_kmalloc(RESYNC_PAGES, gfp_flags); + bio =3D bio_kmalloc(1, gfp_flags); if (!bio) goto out_free_bio; - bio_init_inline(bio, NULL, RESYNC_PAGES, 0); + bio_init_inline(bio, NULL, 1, 0); r1_bio->bios[j] =3D bio; } /* - * Allocate RESYNC_PAGES data pages and attach them to - * the first bio. + * Allocate data folio and attach it to the first bio. * If this is a user-requested check/repair, allocate - * RESYNC_PAGES for each bio. + * folio for each bio. */ if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery)) - need_pages =3D conf->raid_disks * 2; + need_folio =3D conf->raid_disks * 2; else - need_pages =3D 1; + need_folio =3D 1; for (j =3D 0; j < conf->raid_disks * 2; j++) { - struct resync_pages *rp =3D &rps[j]; + struct resync_folio *rf =3D &rfs[j]; =20 bio =3D r1_bio->bios[j]; =20 - if (j < need_pages) { - if (resync_alloc_pages(rp, gfp_flags)) - goto out_free_pages; + if (j < need_folio) { + if (resync_alloc_folio(rf, gfp_flags)) + goto out_free_folio; } else { - memcpy(rp, &rps[0], sizeof(*rp)); - resync_get_all_pages(rp); + memcpy(rf, &rfs[0], sizeof(*rf)); + resync_get_folio(rf); } =20 - rp->raid_bio =3D r1_bio; - bio->bi_private =3D rp; + rf->raid_bio =3D r1_bio; + bio->bi_private =3D rf; } =20 r1_bio->master_bio =3D NULL; =20 return r1_bio; =20 -out_free_pages: +out_free_folio: while (--j >=3D 0) - resync_free_pages(&rps[j]); + resync_free_folio(&rfs[j]); =20 out_free_bio: while (++j < conf->raid_disks * 2) { bio_uninit(r1_bio->bios[j]); kfree(r1_bio->bios[j]); } - kfree(rps); + kfree(rfs); =20 out_free_r1bio: rbio_pool_free(r1_bio, data); @@ -221,17 +220,17 @@ static void r1buf_pool_free(void *__r1_bio, void *dat= a) struct r1conf *conf =3D data; int i; struct r1bio *r1bio =3D __r1_bio; - struct resync_pages *rp =3D NULL; + struct resync_folio *rf =3D NULL; =20 for (i =3D conf->raid_disks * 2; i--; ) { - rp =3D get_resync_pages(r1bio->bios[i]); - resync_free_pages(rp); + rf =3D get_resync_folio(r1bio->bios[i]); + resync_free_folio(rf); bio_uninit(r1bio->bios[i]); kfree(r1bio->bios[i]); } =20 - /* resync pages array stored in the 1st bio's .bi_private */ - kfree(rp); + /* resync folio stored in the 1st bio's .bi_private */ + kfree(rf); =20 rbio_pool_free(r1bio, data); } @@ -2095,10 +2094,10 @@ static void end_sync_write(struct bio *bio) put_sync_write_buf(r1_bio); } =20 -static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, - int sectors, struct page *page, blk_opf_t rw) +static int r1_sync_folio_io(struct md_rdev *rdev, sector_t sector, int sec= tors, + int off, struct folio *folio, blk_opf_t rw) { - if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) + if (sync_folio_io(rdev, sector, sectors << 9, off, folio, rw, false)) /* success */ return 1; if (rw =3D=3D REQ_OP_WRITE) { @@ -2129,10 +2128,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio) struct mddev *mddev =3D r1_bio->mddev; struct r1conf *conf =3D mddev->private; struct bio *bio =3D r1_bio->bios[r1_bio->read_disk]; - struct page **pages =3D get_resync_pages(bio)->pages; + struct folio *folio =3D get_resync_folio(bio)->folio; sector_t sect =3D r1_bio->sector; int sectors =3D r1_bio->sectors; - int idx =3D 0; + int off =3D 0; struct md_rdev *rdev; =20 rdev =3D conf->mirrors[r1_bio->read_disk].rdev; @@ -2162,9 +2161,8 @@ static int fix_sync_read_error(struct r1bio *r1_bio) * active, and resync is currently active */ rdev =3D conf->mirrors[d].rdev; - if (sync_page_io(rdev, sect, s<<9, - pages[idx], - REQ_OP_READ, false)) { + if (sync_folio_io(rdev, sect, s<<9, off, folio, + REQ_OP_READ, false)) { success =3D 1; break; } @@ -2197,7 +2195,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) /* Try next page */ sectors -=3D s; sect +=3D s; - idx++; + off +=3D s << 9; continue; } =20 @@ -2210,8 +2208,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (r1_bio->bios[d]->bi_end_io !=3D end_sync_read) continue; rdev =3D conf->mirrors[d].rdev; - if (r1_sync_page_io(rdev, sect, s, - pages[idx], + if (r1_sync_folio_io(rdev, sect, s, off, folio, REQ_OP_WRITE) =3D=3D 0) { r1_bio->bios[d]->bi_end_io =3D NULL; rdev_dec_pending(rdev, mddev); @@ -2225,14 +2222,13 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (r1_bio->bios[d]->bi_end_io !=3D end_sync_read) continue; rdev =3D conf->mirrors[d].rdev; - if (r1_sync_page_io(rdev, sect, s, - pages[idx], + if (r1_sync_folio_io(rdev, sect, s, off, folio, REQ_OP_READ) !=3D 0) atomic_add(s, &rdev->corrected_errors); } sectors -=3D s; sect +=3D s; - idx ++; + off +=3D s << 9; } set_bit(R1BIO_Uptodate, &r1_bio->state); bio->bi_status =3D 0; @@ -2252,14 +2248,12 @@ static void process_checks(struct r1bio *r1_bio) struct r1conf *conf =3D mddev->private; int primary; int i; - int vcnt; =20 /* Fix variable parts of all bios */ - vcnt =3D (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); for (i =3D 0; i < conf->raid_disks * 2; i++) { blk_status_t status; struct bio *b =3D r1_bio->bios[i]; - struct resync_pages *rp =3D get_resync_pages(b); + struct resync_folio *rf =3D get_resync_folio(b); if (b->bi_end_io !=3D end_sync_read) continue; /* fixup the bio for reuse, but preserve errno */ @@ -2269,11 +2263,11 @@ static void process_checks(struct r1bio *r1_bio) b->bi_iter.bi_sector =3D r1_bio->sector + conf->mirrors[i].rdev->data_offset; b->bi_end_io =3D end_sync_read; - rp->raid_bio =3D r1_bio; - b->bi_private =3D rp; + rf->raid_bio =3D r1_bio; + b->bi_private =3D rf; =20 /* initialize bvec table again */ - md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); + md_bio_reset_resync_folio(b, rf, r1_bio->sectors << 9); } for (primary =3D 0; primary < conf->raid_disks * 2; primary++) if (r1_bio->bios[primary]->bi_end_io =3D=3D end_sync_read && @@ -2284,44 +2278,39 @@ static void process_checks(struct r1bio *r1_bio) } r1_bio->read_disk =3D primary; for (i =3D 0; i < conf->raid_disks * 2; i++) { - int j =3D 0; struct bio *pbio =3D r1_bio->bios[primary]; struct bio *sbio =3D r1_bio->bios[i]; blk_status_t status =3D sbio->bi_status; - struct page **ppages =3D get_resync_pages(pbio)->pages; - struct page **spages =3D get_resync_pages(sbio)->pages; - struct bio_vec *bi; - int page_len[RESYNC_PAGES] =3D { 0 }; - struct bvec_iter_all iter_all; + struct folio *pfolio =3D get_resync_folio(pbio)->folio; + struct folio *sfolio =3D get_resync_folio(sbio)->folio; =20 if (sbio->bi_end_io !=3D end_sync_read) continue; /* Now we can 'fixup' the error value */ sbio->bi_status =3D 0; =20 - bio_for_each_segment_all(bi, sbio, iter_all) - page_len[j++] =3D bi->bv_len; - - if (!status) { - for (j =3D vcnt; j-- ; ) { - if (memcmp(page_address(ppages[j]), - page_address(spages[j]), - page_len[j])) - break; - } - } else - j =3D 0; - if (j >=3D 0) + /* + * Copy data and submit write in two cases: + * - IO error (non-zero status) + * - Data inconsistency and not a CHECK operation. + */ + if (status) { atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); - if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) - && !status)) { - /* No need to write to this device. */ - sbio->bi_end_io =3D NULL; - rdev_dec_pending(conf->mirrors[i].rdev, mddev); + bio_copy_data(sbio, pbio); continue; + } else if (memcmp(folio_address(pfolio), + folio_address(sfolio), + r1_bio->sectors << 9)) { + atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); + if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { + bio_copy_data(sbio, pbio); + continue; + } } =20 - bio_copy_data(sbio, pbio); + /* No need to write to this device. */ + sbio->bi_end_io =3D NULL; + rdev_dec_pending(conf->mirrors[i].rdev, mddev); } } =20 @@ -2446,9 +2435,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) if (rdev && !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); - r1_sync_page_io(rdev, sect, s, - folio_page(conf->tmpfolio, 0), - REQ_OP_WRITE); + r1_sync_folio_io(rdev, sect, s, 0, + conf->tmpfolio, REQ_OP_WRITE); rdev_dec_pending(rdev, mddev); } } @@ -2461,9 +2449,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) if (rdev && !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); - if (r1_sync_page_io(rdev, sect, s, - folio_page(conf->tmpfolio, 0), - REQ_OP_READ)) { + if (r1_sync_folio_io(rdev, sect, s, 0, + conf->tmpfolio, REQ_OP_READ)) { atomic_add(s, &rdev->corrected_errors); pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg= )\n", mdname(mddev), s, @@ -2759,15 +2746,15 @@ static int init_resync(struct r1conf *conf) static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf) { struct r1bio *r1bio =3D mempool_alloc(&conf->r1buf_pool, GFP_NOIO); - struct resync_pages *rps; + struct resync_folio *rfs; struct bio *bio; int i; =20 for (i =3D conf->raid_disks * 2; i--; ) { bio =3D r1bio->bios[i]; - rps =3D bio->bi_private; + rfs =3D bio->bi_private; bio_reset(bio, NULL, 0); - bio->bi_private =3D rps; + bio->bi_private =3D rfs; } r1bio->master_bio =3D NULL; return r1bio; @@ -2799,7 +2786,6 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, int good_sectors =3D RESYNC_SECTORS; int min_bad =3D 0; /* number of sectors that are bad in all devices */ int idx =3D sector_to_idx(sector_nr); - int page_idx =3D 0; =20 if (!mempool_initialized(&conf->r1buf_pool)) if (init_resync(conf)) @@ -3003,8 +2989,8 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, nr_sectors =3D 0; sync_blocks =3D 0; do { - struct page *page; - int len =3D PAGE_SIZE; + struct folio *folio; + int len =3D RESYNC_BLOCK_SIZE; if (sector_nr + (len>>9) > max_sector) len =3D (max_sector - sector_nr) << 9; if (len =3D=3D 0) @@ -3020,24 +3006,19 @@ static sector_t raid1_sync_request(struct mddev *md= dev, sector_t sector_nr, } =20 for (i =3D 0 ; i < conf->raid_disks * 2; i++) { - struct resync_pages *rp; + struct resync_folio *rf; =20 bio =3D r1_bio->bios[i]; - rp =3D get_resync_pages(bio); + rf =3D get_resync_folio(bio); if (bio->bi_end_io) { - page =3D resync_fetch_page(rp, page_idx); - - /* - * won't fail because the vec table is big - * enough to hold all these pages - */ - __bio_add_page(bio, page, len, 0); + folio =3D resync_fetch_folio(rf); + bio_add_folio_nofail(bio, folio, len, 0); } } nr_sectors +=3D len>>9; sector_nr +=3D len>>9; sync_blocks -=3D (len>>9); - } while (++page_idx < RESYNC_PAGES); + } while (0); =20 r1_bio->sectors =3D nr_sectors; =20 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 09238dc9cde6..7533aeb23819 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -96,11 +96,11 @@ static void end_reshape(struct r10conf *conf); =20 /* * for resync bio, r10bio pointer can be retrieved from the per-bio - * 'struct resync_pages'. + * 'struct resync_folio'. */ static inline struct r10bio *get_resync_r10bio(struct bio *bio) { - return get_resync_pages(bio)->raid_bio; + return get_resync_folio(bio)->raid_bio; } =20 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) @@ -133,8 +133,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) struct r10bio *r10_bio; struct bio *bio; int j; - int nalloc, nalloc_rp; - struct resync_pages *rps; + int nalloc, nalloc_rf; + struct resync_folio *rfs; =20 r10_bio =3D r10bio_pool_alloc(gfp_flags, conf); if (!r10_bio) @@ -148,66 +148,65 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void= *data) =20 /* allocate once for all bios */ if (!conf->have_replacement) - nalloc_rp =3D nalloc; + nalloc_rf =3D nalloc; else - nalloc_rp =3D nalloc * 2; - rps =3D kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags); - if (!rps) + nalloc_rf =3D nalloc * 2; + rfs =3D kmalloc_array(nalloc_rf, sizeof(struct resync_folio), gfp_flags); + if (!rfs) goto out_free_r10bio; =20 /* * Allocate bios. */ for (j =3D nalloc ; j-- ; ) { - bio =3D bio_kmalloc(RESYNC_PAGES, gfp_flags); + bio =3D bio_kmalloc(1, gfp_flags); if (!bio) goto out_free_bio; - bio_init_inline(bio, NULL, RESYNC_PAGES, 0); + bio_init_inline(bio, NULL, 1, 0); r10_bio->devs[j].bio =3D bio; if (!conf->have_replacement) continue; - bio =3D bio_kmalloc(RESYNC_PAGES, gfp_flags); + bio =3D bio_kmalloc(1, gfp_flags); if (!bio) goto out_free_bio; - bio_init_inline(bio, NULL, RESYNC_PAGES, 0); + bio_init_inline(bio, NULL, 1, 0); r10_bio->devs[j].repl_bio =3D bio; } /* - * Allocate RESYNC_PAGES data pages and attach them - * where needed. + * Allocate data folio and attach it where needed. */ for (j =3D 0; j < nalloc; j++) { struct bio *rbio =3D r10_bio->devs[j].repl_bio; - struct resync_pages *rp, *rp_repl; + struct resync_folio *rf, *rf_repl; =20 - rp =3D &rps[j]; + rf =3D &rfs[j]; if (rbio) - rp_repl =3D &rps[nalloc + j]; + rf_repl =3D &rfs[nalloc + j]; =20 bio =3D r10_bio->devs[j].bio; =20 if (!j || test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) { - if (resync_alloc_pages(rp, gfp_flags)) - goto out_free_pages; + if (resync_alloc_folio(rf, gfp_flags)) + goto out_free_folio; } else { - memcpy(rp, &rps[0], sizeof(*rp)); - resync_get_all_pages(rp); + memcpy(rf, &rfs[0], sizeof(*rf)); + resync_get_folio(rf); } =20 - rp->raid_bio =3D r10_bio; - bio->bi_private =3D rp; + rf->raid_bio =3D r10_bio; + bio->bi_private =3D rf; if (rbio) { - memcpy(rp_repl, rp, sizeof(*rp)); - rbio->bi_private =3D rp_repl; + memcpy(rf_repl, rf, sizeof(*rf)); + rbio->bi_private =3D rf_repl; } } =20 return r10_bio; =20 -out_free_pages: +out_free_folio: while (--j >=3D 0) - resync_free_pages(&rps[j]); + resync_free_folio(&rfs[j]); =20 j =3D 0; out_free_bio: @@ -219,7 +218,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) bio_uninit(r10_bio->devs[j].repl_bio); kfree(r10_bio->devs[j].repl_bio); } - kfree(rps); + kfree(rfs); out_free_r10bio: rbio_pool_free(r10_bio, conf); return NULL; @@ -230,14 +229,14 @@ static void r10buf_pool_free(void *__r10_bio, void *d= ata) struct r10conf *conf =3D data; struct r10bio *r10bio =3D __r10_bio; int j; - struct resync_pages *rp =3D NULL; + struct resync_folio *rf =3D NULL; =20 for (j =3D conf->copies; j--; ) { struct bio *bio =3D r10bio->devs[j].bio; =20 if (bio) { - rp =3D get_resync_pages(bio); - resync_free_pages(rp); + rf =3D get_resync_folio(bio); + resync_free_folio(rf); bio_uninit(bio); kfree(bio); } @@ -250,7 +249,7 @@ static void r10buf_pool_free(void *__r10_bio, void *dat= a) } =20 /* resync pages array stored in the 1st bio's .bi_private */ - kfree(rp); + kfree(rf); =20 rbio_pool_free(r10bio, conf); } @@ -2342,8 +2341,7 @@ static void sync_request_write(struct mddev *mddev, s= truct r10bio *r10_bio) struct r10conf *conf =3D mddev->private; int i, first; struct bio *tbio, *fbio; - int vcnt; - struct page **tpages, **fpages; + struct folio *tfolio, *ffolio; =20 atomic_set(&r10_bio->remaining, 1); =20 @@ -2359,14 +2357,13 @@ static void sync_request_write(struct mddev *mddev,= struct r10bio *r10_bio) fbio =3D r10_bio->devs[i].bio; fbio->bi_iter.bi_size =3D r10_bio->sectors << 9; fbio->bi_iter.bi_idx =3D 0; - fpages =3D get_resync_pages(fbio)->pages; + ffolio =3D get_resync_folio(fbio)->folio; =20 - vcnt =3D (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); /* now find blocks with errors */ for (i=3D0 ; i < conf->copies ; i++) { - int j, d; + int d; struct md_rdev *rdev; - struct resync_pages *rp; + struct resync_folio *rf; =20 tbio =3D r10_bio->devs[i].bio; =20 @@ -2375,31 +2372,23 @@ static void sync_request_write(struct mddev *mddev,= struct r10bio *r10_bio) if (i =3D=3D first) continue; =20 - tpages =3D get_resync_pages(tbio)->pages; + tfolio =3D get_resync_folio(tbio)->folio; d =3D r10_bio->devs[i].devnum; rdev =3D conf->mirrors[d].rdev; if (!r10_bio->devs[i].bio->bi_status) { /* We know that the bi_io_vec layout is the same for * both 'first' and 'i', so we just compare them. - * All vec entries are PAGE_SIZE; */ - int sectors =3D r10_bio->sectors; - for (j =3D 0; j < vcnt; j++) { - int len =3D PAGE_SIZE; - if (sectors < (len / 512)) - len =3D sectors * 512; - if (memcmp(page_address(fpages[j]), - page_address(tpages[j]), - len)) - break; - sectors -=3D len/512; + if (memcmp(folio_address(ffolio), + folio_address(tfolio), + r10_bio->sectors << 9)) { + atomic64_add(r10_bio->sectors, + &mddev->resync_mismatches); + if (test_bit(MD_RECOVERY_CHECK, + &mddev->recovery)) + /* Don't fix anything. */ + continue; } - if (j =3D=3D vcnt) - continue; - atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); - if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) - /* Don't fix anything. */ - continue; } else if (test_bit(FailFast, &rdev->flags)) { /* Just give up on this device */ md_error(rdev->mddev, rdev); @@ -2410,13 +2399,13 @@ static void sync_request_write(struct mddev *mddev,= struct r10bio *r10_bio) * First we need to fixup bv_offset, bv_len and * bi_vecs, as the read request might have corrupted these */ - rp =3D get_resync_pages(tbio); + rf =3D get_resync_folio(tbio); bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE); =20 - md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); + md_bio_reset_resync_folio(tbio, rf, fbio->bi_iter.bi_size); =20 - rp->raid_bio =3D r10_bio; - tbio->bi_private =3D rp; + rf->raid_bio =3D r10_bio; + tbio->bi_private =3D rf; tbio->bi_iter.bi_sector =3D r10_bio->devs[i].addr; tbio->bi_end_io =3D end_sync_write; =20 @@ -2476,10 +2465,9 @@ static void fix_recovery_read_error(struct r10bio *r= 10_bio) struct bio *bio =3D r10_bio->devs[0].bio; sector_t sect =3D 0; int sectors =3D r10_bio->sectors; - int idx =3D 0; int dr =3D r10_bio->devs[0].devnum; int dw =3D r10_bio->devs[1].devnum; - struct page **pages =3D get_resync_pages(bio)->pages; + struct folio *folio =3D get_resync_folio(bio)->folio; =20 while (sectors) { int s =3D sectors; @@ -2492,19 +2480,21 @@ static void fix_recovery_read_error(struct r10bio *= r10_bio) =20 rdev =3D conf->mirrors[dr].rdev; addr =3D r10_bio->devs[0].addr + sect; - ok =3D sync_page_io(rdev, - addr, - s << 9, - pages[idx], - REQ_OP_READ, false); + ok =3D sync_folio_io(rdev, + addr, + s << 9, + sect << 9, + folio, + REQ_OP_READ, false); if (ok) { rdev =3D conf->mirrors[dw].rdev; addr =3D r10_bio->devs[1].addr + sect; - ok =3D sync_page_io(rdev, - addr, - s << 9, - pages[idx], - REQ_OP_WRITE, false); + ok =3D sync_folio_io(rdev, + addr, + s << 9, + sect << 9, + folio, + REQ_OP_WRITE, false); if (!ok) { set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, @@ -2539,7 +2529,6 @@ static void fix_recovery_read_error(struct r10bio *r1= 0_bio) =20 sectors -=3D s; sect +=3D s; - idx++; } } =20 @@ -3068,7 +3057,7 @@ static int init_resync(struct r10conf *conf) static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf) { struct r10bio *r10bio =3D mempool_alloc(&conf->r10buf_pool, GFP_NOIO); - struct rsync_pages *rp; + struct resync_folio *rf; struct bio *bio; int nalloc; int i; @@ -3081,14 +3070,14 @@ static struct r10bio *raid10_alloc_init_r10buf(stru= ct r10conf *conf) =20 for (i =3D 0; i < nalloc; i++) { bio =3D r10bio->devs[i].bio; - rp =3D bio->bi_private; + rf =3D bio->bi_private; bio_reset(bio, NULL, 0); - bio->bi_private =3D rp; + bio->bi_private =3D rf; bio =3D r10bio->devs[i].repl_bio; if (bio) { - rp =3D bio->bi_private; + rf =3D bio->bi_private; bio_reset(bio, NULL, 0); - bio->bi_private =3D rp; + bio->bi_private =3D rf; } } return r10bio; @@ -3174,7 +3163,6 @@ static sector_t raid10_sync_request(struct mddev *mdd= ev, sector_t sector_nr, int max_sync =3D RESYNC_SECTORS; sector_t sync_blocks; sector_t chunk_mask =3D conf->geo.chunk_mask; - int page_idx =3D 0; =20 /* * Allow skipping a full rebuild for incremental assembly @@ -3642,25 +3630,26 @@ static sector_t raid10_sync_request(struct mddev *m= ddev, sector_t sector_nr, if (sector_nr + max_sync < max_sector) max_sector =3D sector_nr + max_sync; do { - struct page *page; - int len =3D PAGE_SIZE; + int len =3D RESYNC_BLOCK_SIZE; + if (sector_nr + (len>>9) > max_sector) len =3D (max_sector - sector_nr) << 9; if (len =3D=3D 0) break; for (bio=3D biolist ; bio ; bio=3Dbio->bi_next) { - struct resync_pages *rp =3D get_resync_pages(bio); - page =3D resync_fetch_page(rp, page_idx); - if (WARN_ON(!bio_add_page(bio, page, len, 0))) { + struct resync_folio *rf =3D get_resync_folio(bio); + struct folio *folio =3D resync_fetch_folio(rf); + + if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) { bio->bi_status =3D BLK_STS_RESOURCE; bio_endio(bio); *skipped =3D 1; - return max_sync; + return len; } } nr_sectors +=3D len>>9; sector_nr +=3D len>>9; - } while (++page_idx < RESYNC_PAGES); + } while (0); r10_bio->sectors =3D nr_sectors; =20 if (mddev_is_clustered(mddev) && @@ -4578,7 +4567,7 @@ static sector_t reshape_request(struct mddev *mddev, = sector_t sector_nr, int *skipped) { /* We simply copy at most one chunk (smallest of old and new) - * at a time, possibly less if that exceeds RESYNC_PAGES, + * at a time, possibly less if that exceeds RESYNC_BLOCK_SIZE, * or we hit a bad block or something. * This might mean we pause for normal IO in the middle of * a chunk, but that is not a problem as mddev->reshape_position @@ -4618,14 +4607,13 @@ static sector_t reshape_request(struct mddev *mddev= , sector_t sector_nr, struct r10bio *r10_bio; sector_t next, safe, last; int max_sectors; - int nr_sectors; int s; struct md_rdev *rdev; int need_flush =3D 0; struct bio *blist; struct bio *bio, *read_bio; int sectors_done =3D 0; - struct page **pages; + struct folio *folio; =20 if (sector_nr =3D=3D 0) { /* If restarting in the middle, skip the initial sectors */ @@ -4741,7 +4729,7 @@ static sector_t reshape_request(struct mddev *mddev, = sector_t sector_nr, return sectors_done; } =20 - read_bio =3D bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ, + read_bio =3D bio_alloc_bioset(rdev->bdev, 1, REQ_OP_READ, GFP_KERNEL, &mddev->bio_set); read_bio->bi_iter.bi_sector =3D (r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset); @@ -4805,32 +4793,23 @@ static sector_t reshape_request(struct mddev *mddev= , sector_t sector_nr, blist =3D b; } =20 - /* Now add as many pages as possible to all of these bios. */ + /* Now add folio to all of these bios. */ =20 - nr_sectors =3D 0; - pages =3D get_resync_pages(r10_bio->devs[0].bio)->pages; - for (s =3D 0 ; s < max_sectors; s +=3D PAGE_SIZE >> 9) { - struct page *page =3D pages[s / (PAGE_SIZE >> 9)]; - int len =3D (max_sectors - s) << 9; - if (len > PAGE_SIZE) - len =3D PAGE_SIZE; - for (bio =3D blist; bio ; bio =3D bio->bi_next) { - if (WARN_ON(!bio_add_page(bio, page, len, 0))) { - bio->bi_status =3D BLK_STS_RESOURCE; - bio_endio(bio); - return sectors_done; - } + folio =3D get_resync_folio(r10_bio->devs[0].bio)->folio; + for (bio =3D blist; bio ; bio =3D bio->bi_next) { + if (WARN_ON(!bio_add_folio(bio, folio, max_sectors, 0))) { + bio->bi_status =3D BLK_STS_RESOURCE; + bio_endio(bio); + return sectors_done; } - sector_nr +=3D len >> 9; - nr_sectors +=3D len >> 9; } - r10_bio->sectors =3D nr_sectors; + r10_bio->sectors =3D max_sectors >> 9; =20 /* Now submit the read */ atomic_inc(&r10_bio->remaining); read_bio->bi_next =3D NULL; submit_bio_noacct(read_bio); - sectors_done +=3D nr_sectors; + sectors_done +=3D max_sectors; if (sector_nr <=3D last) goto read_more; =20 @@ -4932,8 +4911,8 @@ static int handle_reshape_read_error(struct mddev *md= dev, struct r10conf *conf =3D mddev->private; struct r10bio *r10b; int slot =3D 0; - int idx =3D 0; - struct page **pages; + int sect =3D 0; + struct folio *folio; =20 r10b =3D kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); if (!r10b) { @@ -4941,8 +4920,8 @@ static int handle_reshape_read_error(struct mddev *md= dev, return -ENOMEM; } =20 - /* reshape IOs share pages from .devs[0].bio */ - pages =3D get_resync_pages(r10_bio->devs[0].bio)->pages; + /* reshape IOs share folio from .devs[0].bio */ + folio =3D get_resync_folio(r10_bio->devs[0].bio)->folio; =20 r10b->sector =3D r10_bio->sector; __raid10_find_phys(&conf->prev, r10b); @@ -4958,19 +4937,19 @@ static int handle_reshape_read_error(struct mddev *= mddev, while (!success) { int d =3D r10b->devs[slot].devnum; struct md_rdev *rdev =3D conf->mirrors[d].rdev; - sector_t addr; if (rdev =3D=3D NULL || test_bit(Faulty, &rdev->flags) || !test_bit(In_sync, &rdev->flags)) goto failed; =20 - addr =3D r10b->devs[slot].addr + idx * PAGE_SIZE; atomic_inc(&rdev->nr_pending); - success =3D sync_page_io(rdev, - addr, - s << 9, - pages[idx], - REQ_OP_READ, false); + success =3D sync_folio_io(rdev, + r10b->devs[slot].addr + + sect, + s << 9, + sect << 9, + folio, + REQ_OP_READ, false); rdev_dec_pending(rdev, mddev); if (success) break; @@ -4989,7 +4968,7 @@ static int handle_reshape_read_error(struct mddev *md= dev, return -EIO; } sectors -=3D s; - idx++; + sect +=3D s; } kfree(r10b); return 0; --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 962B533AD98; Wed, 28 Jan 2026 08:06:21 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587586; cv=none; b=hyB1csmBH03oHv2fZvHvDXUOKqsI2JrL39YI3v5kr42XVRdyeAxYm3Qq+HYt1abKdVCRE0JL8fjKK5vCnqtLcuqNGjpPx0oBfadViknnoTbvSHaJDOKMNohdmC0ftVqqPmTkqVFFVDLd85U0GhcsGrBW1T/31GqiFaNR8PAWtIs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587586; c=relaxed/simple; bh=t3zzn+TK20wic0tjff/CfXNfXyZZYq/ohxmBT7DuvLg=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=p+UsBkJnR5VrRLCWFe6mFnpdgrvm6HfN4sH2PLvrBr+93TsiXzzNVcyV7eHz1XcQ54rgwRy2JllK8oP2Ky9LaHejmleXhpBT8UxOYJIcsUzG7yLREofzWmb0JTVm+5gkWWo7DpG/oAL29P7J51t40lzCHXyz4lzrkS14TN1USP0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4f1FG226j9zKHMl1; Wed, 28 Jan 2026 16:06:02 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 7A23F4056E; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S10; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 06/14] md: Clean up folio sync support related code Date: Wed, 28 Jan 2026 15:57:00 +0800 Message-Id: <20260128075708.2259525-7-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S10 X-Coremail-Antispam: 1UD129KBjvJXoWxAF4rJr18CF1rXrW8XFWxCrg_yoW5uFy8pa 9rGrySv3yrKF48ZF4Dtw4UAayFk34Yga4UCFWfua97uF13ZFyDKF4jqa48Jr1DAF95Ca4F qF93Ja1UXa15tF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUCVW8JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVWUJVW8JwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan 1. Remove resync_get_all_folio() and invoke folio_get() directly instead. 2. Clean up redundant while(0) loop in md_bio_reset_resync_folio(). 3. Clean up bio variable by directly referencing r10_bio->devs[j].bio instead in r1buf_pool_alloc() and r10buf_pool_alloc(). 4. Clean up RESYNC_PAGES. Signed-off-by: Li Nan Reviewed-by: Xiao Ni --- drivers/md/raid1-10.c | 22 ++++++---------------- drivers/md/raid1.c | 6 ++---- drivers/md/raid10.c | 6 ++---- 3 files changed, 10 insertions(+), 24 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 300fbe9dc02e..568ab002691f 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Maximum size of each resync request */ #define RESYNC_BLOCK_SIZE (64*1024) -#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) =20 /* @@ -56,11 +55,6 @@ static inline void resync_free_folio(struct resync_folio= *rf) folio_put(rf->folio); } =20 -static inline void resync_get_folio(struct resync_folio *rf) -{ - folio_get(rf->folio); -} - static inline struct folio *resync_fetch_folio(struct resync_folio *rf) { return rf->folio; @@ -80,16 +74,12 @@ static void md_bio_reset_resync_folio(struct bio *bio, = struct resync_folio *rf, int size) { /* initialize bvec table again */ - do { - struct folio *folio =3D resync_fetch_folio(rf); - int len =3D min_t(int, size, RESYNC_BLOCK_SIZE); - - if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) { - bio->bi_status =3D BLK_STS_RESOURCE; - bio_endio(bio); - return; - } - } while (0); + if (WARN_ON(!bio_add_folio(bio, resync_fetch_folio(rf), + min_t(int, size, RESYNC_BLOCK_SIZE), + 0))) { + bio->bi_status =3D BLK_STS_RESOURCE; + bio_endio(bio); + } } =20 =20 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d9c106529289..5954ead7dfd4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -181,18 +181,16 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void = *data) for (j =3D 0; j < conf->raid_disks * 2; j++) { struct resync_folio *rf =3D &rfs[j]; =20 - bio =3D r1_bio->bios[j]; - if (j < need_folio) { if (resync_alloc_folio(rf, gfp_flags)) goto out_free_folio; } else { memcpy(rf, &rfs[0], sizeof(*rf)); - resync_get_folio(rf); + folio_get(rf->folio); } =20 rf->raid_bio =3D r1_bio; - bio->bi_private =3D rf; + r1_bio->bios[j]->bi_private =3D rf; } =20 r1_bio->master_bio =3D NULL; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7533aeb23819..5c0975ec8809 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -183,19 +183,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void= *data) if (rbio) rf_repl =3D &rfs[nalloc + j]; =20 - bio =3D r10_bio->devs[j].bio; - if (!j || test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) { if (resync_alloc_folio(rf, gfp_flags)) goto out_free_folio; } else { memcpy(rf, &rfs[0], sizeof(*rf)); - resync_get_folio(rf); + folio_get(rf->folio); } =20 rf->raid_bio =3D r10_bio; - bio->bi_private =3D rf; + r10_bio->devs[j].bio->bi_private =3D rf; if (rbio) { memcpy(rf_repl, rf, sizeof(*rf)); rbio->bi_private =3D rf_repl; --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 963B633AD9C; Wed, 28 Jan 2026 08:06:21 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587584; cv=none; b=OMguzaoetfq8Gc3NrH7pxlPUUZtNFKNnW+7Qy1x2jQM1ONhZYbhwaOXPppg+z4u7dckNl673pPQMPQiTkr+z9m4lBzGqcwkvhxTgio8DyzKA8GlAsopxlS/tKGSE0n//mbmcqaToAGYH2OWewXruoAVs2+G0QpUyhf6KchPBUdU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587584; c=relaxed/simple; bh=hyh5xxHW3TbkiRsSqhyv/vZFDOimcfRQ0MJJkiNayeo=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=qxvQfsJzxfb00r8R0CxI/wJmmgHtxNa4HzfiWYa48gJSnAgAx43MxMybuaAYCORo17yCSsK6ImZtZWC4cmb2zdtjiR3uRMqGH9a6FpmM78+X6SlO/4lF09xE36iXv5SsNYQxc50L1I+kkGQUxYdPxAuA8Kuo+hnEUTCusqmnIIU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=none smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4f1FG22WXHzKHMl1; Wed, 28 Jan 2026 16:06:02 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 87C1D40539; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S11; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 07/14] md/raid1: clean up useless sync_blocks handling in raid1_sync_request Date: Wed, 28 Jan 2026 15:57:01 +0800 Message-Id: <20260128075708.2259525-8-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S11 X-Coremail-Antispam: 1UD129KBjvJXoW7CF43Ar43JFy7Cr4xAr1fWFg_yoW8AF17pa 17JryagryrWFW5Z3ZxAr1UCFyFkFy7trWUJFWfW3s7WFZ7Gr97CF4UXF1agFyqqa4aqrW5 X3s5AF45CFy3tF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUCVW8JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVWUJVW8JwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Since the loop is changed to while(0), some handling of sync_blocks in raid1_sync_request() is no longer needed and can be removed. No functional changes. Signed-off-by: Li Nan --- drivers/md/raid1.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5954ead7dfd4..2308e16b1280 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2985,7 +2985,6 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, if (max_sector > sector_nr + good_sectors) max_sector =3D sector_nr + good_sectors; nr_sectors =3D 0; - sync_blocks =3D 0; do { struct folio *folio; int len =3D RESYNC_BLOCK_SIZE; @@ -2993,15 +2992,13 @@ static sector_t raid1_sync_request(struct mddev *md= dev, sector_t sector_nr, len =3D (max_sector - sector_nr) << 9; if (len =3D=3D 0) break; - if (sync_blocks =3D=3D 0) { - if (!md_bitmap_start_sync(mddev, sector_nr, - &sync_blocks, still_degraded) && - !conf->fullsync && - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) - break; - if ((len >> 9) > sync_blocks) - len =3D sync_blocks<<9; - } + if (!md_bitmap_start_sync(mddev, sector_nr, + &sync_blocks, still_degraded) && + !conf->fullsync && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) + break; + if ((len >> 9) > sync_blocks) + len =3D sync_blocks<<9; =20 for (i =3D 0 ; i < conf->raid_disks * 2; i++) { struct resync_folio *rf; @@ -3015,7 +3012,6 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, } nr_sectors +=3D len>>9; sector_nr +=3D len>>9; - sync_blocks -=3D (len>>9); } while (0); =20 r1_bio->sectors =3D nr_sectors; --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 5116E3382FD; Wed, 28 Jan 2026 08:06:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587584; cv=none; b=b6tStkieRRNM4xmpdjV9AXUIWX6AVgja1gg2bEghfXcZKYdYoqtNBTq58+umcitiCmXYzFDraEajrJtSSaFlmVqv+PFZ3YWpUgh9AXerNJnzHnxrO5nY76JJTbK9NXvPY/xPh5Cih8jRTcrjv8MeN9yKfZj8NAFBA9FzrpqnJeQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587584; c=relaxed/simple; bh=P7q+SzrHydbwifHaFnCM6yPStt0p95CAPZ/FR56oykk=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=a4RRepPogvCVzNcswO5tBD6MGValDDUzk7+Gj9+KMf9honBGIq2RhajMQTEXWxSFlO0cPM/iVUgz+09OORPz6hI+NP+nUAfvOVTOGbBG2itYFKqQTZIwy2kE1FrvQ/QXnyLXHDekpucS52ed4Go2JgQGLKwFR6H97AHm4GpQNRY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=none smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4f1FG23D32zKHMlK; Wed, 28 Jan 2026 16:06:02 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 9FB394056E; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S12; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 08/14] md/raid1: fix IO error at logical block size granularity Date: Wed, 28 Jan 2026 15:57:02 +0800 Message-Id: <20260128075708.2259525-9-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S12 X-Coremail-Antispam: 1UD129KBjvJXoW7ur15uFW3ZFy5ArW8WF4fAFb_yoW8Zr1kpa 13GrZYvr4UGrWjyw4DAryqy3WFkaySyFWUGrs5W39F9FyDZ3sIgFyUKFWYgFy0kr9ayayU Wwnrtr48C3W7tF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUCVW8JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVWUJVW8JwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan RAID1 currently fixes IO error at PAGE_SIZE granularity. Fix at smaller granularity can handle more errors, and RAID will support logical block sizes larger than PAGE_SIZE in the future, where PAGE_SIZE IO will fail. Switch IO error fix granularity to logical block size. Signed-off-by: Li Nan Reviewed-by: Yu Kuai --- drivers/md/raid1.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 2308e16b1280..27e3b2375b16 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2114,7 +2114,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) { /* Try some synchronous reads of other devices to get * good data, much like with normal read errors. Only - * read into the pages we already have so we don't + * read into the block we already have so we don't * need to re-issue the read request. * We don't need to freeze the array, because being in an * active sync request, there is no normal IO, and @@ -2145,13 +2145,11 @@ static int fix_sync_read_error(struct r1bio *r1_bio) } =20 while(sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); int d =3D r1_bio->read_disk; int success =3D 0; int start; =20 - if (s > (PAGE_SIZE>>9)) - s =3D PAGE_SIZE >> 9; do { if (r1_bio->bios[d]->bi_end_io =3D=3D end_sync_read) { /* No rcu protection needed here devices @@ -2190,7 +2188,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (abort) return 0; =20 - /* Try next page */ + /* Try next block */ sectors -=3D s; sect +=3D s; off +=3D s << 9; @@ -2388,14 +2386,11 @@ static void fix_read_error(struct r1conf *conf, str= uct r1bio *r1_bio) } =20 while(sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); int d =3D read_disk; int success =3D 0; int start; =20 - if (s > (PAGE_SIZE>>9)) - s =3D PAGE_SIZE >> 9; - do { rdev =3D conf->mirrors[d].rdev; if (rdev && --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AE7703382FA; Wed, 28 Jan 2026 08:06:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587580; cv=none; b=QIRfXzRjix5+Mc3lte4pBmjKmxMi5a2AiDR1fw36FGeP60iNl9P5IhkXhRype5+ozToi3I7BgNTBua8Opx/uabdBaKoIolyBqaVMTWOi4L8aR6NW7QxiGqnP1raUOms3ki+Yx1yT2JfPKpYetp1NvRrC/FaPWZEFQvuOX2Mp9oA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587580; c=relaxed/simple; bh=8NC2ehBTY8Bn/LphTnL1rwWQpfZKABThW9nZ4iBCfns=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=lsI/1H8zZC0sXmZQJRbilhx4qgeSH1VCt5tigWtsPWMGnGx59kd2j5eF2PnnAoO0EhD5NkmArpzR/54PuxvUkzeSYVrzNOG45VfvwdVxxU75GiKinfr4uEwB+uJzQsDd2lBZV83DroG8cK/84rgeclgUkI3TMRDy3UaoetlclcM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.177]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4f1FFY5CPtzYQv1X; Wed, 28 Jan 2026 16:05:37 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id AAE6040593; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S13; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 09/14] md/raid10: fix IO error at logical block size granularity Date: Wed, 28 Jan 2026 15:57:03 +0800 Message-Id: <20260128075708.2259525-10-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S13 X-Coremail-Antispam: 1UD129KBjvJXoW7Wry7KrWfWw4DGF43AryDtrb_yoW5Jr1xpa 9IkF1a9rWDGa1UZrnrAFWDX3WFk3y5tFWUtry8Gw4IgF98tryDKF45XFWYgry5CFWfZw10 gr1DKr4xAa4kJF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUCVW8JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVWUJVW8JwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan RAID10 currently fixes IO error at PAGE_SIZE granularity. Fix at smaller granularity can handle more errors, and RAID will support logical block sizes larger than PAGE_SIZE in the future, where PAGE_SIZE IO will fail. Switch IO error fix granularity to logical block size. Signed-off-by: Li Nan Reviewed-by: Yu Kuai --- drivers/md/raid10.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5c0975ec8809..06257eea97ed 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2452,7 +2452,7 @@ static void sync_request_write(struct mddev *mddev, s= truct r10bio *r10_bio) static void fix_recovery_read_error(struct r10bio *r10_bio) { /* We got a read error during recovery. - * We repeat the read in smaller page-sized sections. + * We repeat the read in smaller logical_block_sized sections. * If a read succeeds, write it to the new device or record * a bad block if we cannot. * If a read fails, record a bad block on both old and @@ -2468,14 +2468,11 @@ static void fix_recovery_read_error(struct r10bio *= r10_bio) struct folio *folio =3D get_resync_folio(bio)->folio; =20 while (sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); struct md_rdev *rdev; sector_t addr; int ok; =20 - if (s > (PAGE_SIZE>>9)) - s =3D PAGE_SIZE >> 9; - rdev =3D conf->mirrors[dr].rdev; addr =3D r10_bio->devs[0].addr + sect; ok =3D sync_folio_io(rdev, @@ -2619,14 +2616,11 @@ static void fix_read_error(struct r10conf *conf, st= ruct mddev *mddev, struct r10 } =20 while(sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); int sl =3D slot; int success =3D 0; int start; =20 - if (s > (PAGE_SIZE>>9)) - s =3D PAGE_SIZE >> 9; - do { d =3D r10_bio->devs[sl].devnum; rdev =3D conf->mirrors[d].rdev; @@ -4925,16 +4919,14 @@ static int handle_reshape_read_error(struct mddev *= mddev, __raid10_find_phys(&conf->prev, r10b); =20 while (sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); int success =3D 0; int first_slot =3D slot; =20 - if (s > (PAGE_SIZE >> 9)) - s =3D PAGE_SIZE >> 9; - while (!success) { int d =3D r10b->devs[slot].devnum; struct md_rdev *rdev =3D conf->mirrors[d].rdev; + if (rdev =3D=3D NULL || test_bit(Faulty, &rdev->flags) || !test_bit(In_sync, &rdev->flags)) --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 139193382DA; Wed, 28 Jan 2026 08:06:21 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587585; cv=none; b=qJrduDmc7VOor4pU9Hqyif1Bm1wy0tc7dRfsHycvRa2EgwGkJf71rJIStBoMvnJO1RW5K0xrc0eQXm4k5qHDahQYlq/y9lAtWisJ1a65jbCqpvzqDsdf5V9Ke3cbdsjEw92WureuWuF2Tr89ngm3fKQ/mISFmcuxubt94ICjHTw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587585; c=relaxed/simple; bh=BUB2U5HlQfzITHvCbVqC9FXEMMg9pMLz3dr2kFJbIuk=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=IWiAJKh9jL6EgAy4Cr9rT2JYY2NV99D89a2nKnACmTxsoG7JqcWA0xnQK4pyEl7OG1yCWKG9xpsxoWaiEhb3eVd4/L8uch15HctXMUrqhVgD71IHtUHFXqmBckz3DxQx/1Pi/mrwahssECnwiQWIamtGxasO76r5DKphFi9A3h8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=none smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4f1FFY5kWjzYQv1K; Wed, 28 Jan 2026 16:05:37 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id B95E340539; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S14; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 10/14] md/raid1,raid10: clean up resync_fetch_folio Date: Wed, 28 Jan 2026 15:57:04 +0800 Message-Id: <20260128075708.2259525-11-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S14 X-Coremail-Antispam: 1UD129KBjvJXoWxCrWDtw4xXw4xJrW5JryDGFg_yoW5WF47pa nFgrZxXw48Kay8Aws8ZF48C3WFka43tryjyF4xW3s7uFyfXFyqgF4UXay8WF98XF98K34F qa48t3y5Z3WrJF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVW8JVWxJwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan The helper resync_fetch_folio() only returns the folio member without any additional logic. Clean it up by accessing rf->folio directly. Signed-off-by: Li Nan --- drivers/md/raid1-10.c | 7 +------ drivers/md/raid1.c | 10 ++++------ drivers/md/raid10.c | 3 +-- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 568ab002691f..2ff1f8855900 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -55,11 +55,6 @@ static inline void resync_free_folio(struct resync_folio= *rf) folio_put(rf->folio); } =20 -static inline struct folio *resync_fetch_folio(struct resync_folio *rf) -{ - return rf->folio; -} - /* * 'strct resync_folio' stores actual pages used for doing the resync * IO, and it is per-bio, so make .bi_private points to it. @@ -74,7 +69,7 @@ static void md_bio_reset_resync_folio(struct bio *bio, st= ruct resync_folio *rf, int size) { /* initialize bvec table again */ - if (WARN_ON(!bio_add_folio(bio, resync_fetch_folio(rf), + if (WARN_ON(!bio_add_folio(bio, rf->folio, min_t(int, size, RESYNC_BLOCK_SIZE), 0))) { bio->bi_status =3D BLK_STS_RESOURCE; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 27e3b2375b16..a303349eeff4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2981,8 +2981,8 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, max_sector =3D sector_nr + good_sectors; nr_sectors =3D 0; do { - struct folio *folio; int len =3D RESYNC_BLOCK_SIZE; + if (sector_nr + (len>>9) > max_sector) len =3D (max_sector - sector_nr) << 9; if (len =3D=3D 0) @@ -2996,13 +2996,11 @@ static sector_t raid1_sync_request(struct mddev *md= dev, sector_t sector_nr, len =3D sync_blocks<<9; =20 for (i =3D 0 ; i < conf->raid_disks * 2; i++) { - struct resync_folio *rf; - bio =3D r1_bio->bios[i]; - rf =3D get_resync_folio(bio); if (bio->bi_end_io) { - folio =3D resync_fetch_folio(rf); - bio_add_folio_nofail(bio, folio, len, 0); + struct resync_folio *rf =3D get_resync_folio(bio); + + bio_add_folio_nofail(bio, rf->folio, len, 0); } } nr_sectors +=3D len>>9; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 06257eea97ed..d8a5fadfc933 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3630,9 +3630,8 @@ static sector_t raid10_sync_request(struct mddev *mdd= ev, sector_t sector_nr, break; for (bio=3D biolist ; bio ; bio=3Dbio->bi_next) { struct resync_folio *rf =3D get_resync_folio(bio); - struct folio *folio =3D resync_fetch_folio(rf); =20 - if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) { + if (WARN_ON(!bio_add_folio(bio, rf->folio, len, 0))) { bio->bi_status =3D BLK_STS_RESOURCE; bio_endio(bio); *skipped =3D 1; --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 50A463382D2; Wed, 28 Jan 2026 08:06:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587585; cv=none; b=u37LqDhl80ORV9Zr43NK0rLUxbsyd/Q404vy0L84LeP6riWKpHaMHFNShcXFDLoR5IlTaK+jO2r7ndzx8Iym8o82gXjJjPHBJA0HGVyUAeooKlMuAX0LlPDmo5AOGhPpl1PNvDIqvDIEL+PzqKrhwU6rfpkfeel641N+CzVKNWs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587585; c=relaxed/simple; bh=v1ca4ji6icjqN01yAKdlL3ILFAmiqg1Q9Weya/OoKH0=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=b+k4TdP90ea9CNOgT93WCj3Y9mocUq25ARTk3dV7Y+uphAsear0BLJnhHM21JmsHgVkIWuFjm7zqQxNW19CWWpexstFVsgBzw5/7Wnl0nh2VOW8GZvyqVslshsjdTHr6wUP4Uqt43tJVijyBooXQ/LqVElHe6aerTAfgHHIhtmM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4f1FFZ0kQrzYQv2R; Wed, 28 Jan 2026 16:05:37 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id D040E4056B; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S15; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 11/14] md: clean up resync_free_folio Date: Wed, 28 Jan 2026 15:57:05 +0800 Message-Id: <20260128075708.2259525-12-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S15 X-Coremail-Antispam: 1UD129KBjvJXoW7tFW5uFyxCrW3WF18KFykKrg_yoW8uF17pa n8Wr9Ivw48GFW8AF4DZF4UZFy5Cay7K3yjkFWxuws3ZFy3ZFyDWa1UJa4UKr4DXrn8Ga4I qF98GrW3XF1rJF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVW8JVWxJwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan The resync_free_folio() helper only wraps a single folio_put() call, so remove it and call folio_put() directly. Signed-off-by: Li Nan --- drivers/md/raid1-10.c | 5 ----- drivers/md/raid1.c | 4 ++-- drivers/md/raid10.c | 4 ++-- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 2ff1f8855900..ffbd7bd0f6e8 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -50,11 +50,6 @@ static inline int resync_alloc_folio(struct resync_folio= *rf, return 0; } =20 -static inline void resync_free_folio(struct resync_folio *rf) -{ - folio_put(rf->folio); -} - /* * 'strct resync_folio' stores actual pages used for doing the resync * IO, and it is per-bio, so make .bi_private points to it. diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a303349eeff4..b5239b5cb4e9 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -199,7 +199,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *d= ata) =20 out_free_folio: while (--j >=3D 0) - resync_free_folio(&rfs[j]); + folio_put(rfs[j].folio); =20 out_free_bio: while (++j < conf->raid_disks * 2) { @@ -222,7 +222,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data) =20 for (i =3D conf->raid_disks * 2; i--; ) { rf =3D get_resync_folio(r1bio->bios[i]); - resync_free_folio(rf); + folio_put(rf->folio); bio_uninit(r1bio->bios[i]); kfree(r1bio->bios[i]); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d8a5fadfc933..b728131bdad4 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -204,7 +204,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) =20 out_free_folio: while (--j >=3D 0) - resync_free_folio(&rfs[j]); + folio_put(rfs[j].folio); =20 j =3D 0; out_free_bio: @@ -234,7 +234,7 @@ static void r10buf_pool_free(void *__r10_bio, void *dat= a) =20 if (bio) { rf =3D get_resync_folio(bio); - resync_free_folio(rf); + folio_put(rf->folio); bio_uninit(bio); kfree(bio); } --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 18EA333E35C; Wed, 28 Jan 2026 08:06:25 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587588; cv=none; b=m1CAnW0/1bmX69VEutM0ccS4ZgUUm4oIru4VdDFwcTOxQotmwSHIJMVCYWlVMjB/X5d9Htjqbqdo9rnzRLz+hFlJssOi+/bHIvc27ApaMvFyCbdm9SKfV3OclpD3hVZ51vqQVSkjqf6sr6IK+P+FrN54NgQ9ssYQhtnEcHoagLM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587588; c=relaxed/simple; bh=OQzhPQQuk4PYy9Nvn1MQer5EX0ZLWqiPDUVNKKCVVXw=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=obhPygLdHxXrWQ/DmGZObEW1aJHy9MS9q2YSu4o8gsX2skBfgYOk1U1+lNLAIftVaA0x3PosyefDoJtPls1LF9sQYObcP+PkGMuaTZ1W4LmHk0NjRiSJ2wNFm5DcnWtMq7uim9J8cTTNnSPM+G3I0hEiIT0Td2R/e1X70G8u3A8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.177]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4f1FG25Cd9zKHMlN; Wed, 28 Jan 2026 16:06:02 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id E509240593; Wed, 28 Jan 2026 16:06:14 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S16; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 12/14] md/raid1: clean up sync IO size calculation in raid1_sync_request Date: Wed, 28 Jan 2026 15:57:06 +0800 Message-Id: <20260128075708.2259525-13-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S16 X-Coremail-Antispam: 1UD129KBjvJXoW7Cw1DAry7WFWUKr45tw1rXrb_yoW8WF1rpw nxGr93W3y8JF45Zw13Jw1UCF1FkFy5KrWUJF4SgwnrWFn7Gr9rCF48XF1YgFyDua4ftrWr X34kAr45A3WDJF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQF14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJwCI42IY6xAIw20EY4 v20xvaj40_Jr0_JF4lIxAIcVC2z280aVAFwI0_Gr0_Cr1lIxAIcVC2z280aVCY1x0267AK xVW8Jr0_Cr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Use 'nr_sectors' directly for sync IO size calculation. Prepare folio allocation failure fallback. No functional changes. Signed-off-by: Li Nan --- drivers/md/raid1.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b5239b5cb4e9..2253e65c5f03 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2979,32 +2979,27 @@ static sector_t raid1_sync_request(struct mddev *md= dev, sector_t sector_nr, max_sector =3D mddev->resync_max; /* Don't do IO beyond here */ if (max_sector > sector_nr + good_sectors) max_sector =3D sector_nr + good_sectors; - nr_sectors =3D 0; do { - int len =3D RESYNC_BLOCK_SIZE; - - if (sector_nr + (len>>9) > max_sector) - len =3D (max_sector - sector_nr) << 9; - if (len =3D=3D 0) + nr_sectors =3D max_sector - sector_nr; + if (nr_sectors =3D=3D 0) break; if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, still_degraded) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) break; - if ((len >> 9) > sync_blocks) - len =3D sync_blocks<<9; + if (nr_sectors > sync_blocks) + nr_sectors =3D sync_blocks; =20 for (i =3D 0 ; i < conf->raid_disks * 2; i++) { bio =3D r1_bio->bios[i]; if (bio->bi_end_io) { struct resync_folio *rf =3D get_resync_folio(bio); =20 - bio_add_folio_nofail(bio, rf->folio, len, 0); + bio_add_folio_nofail(bio, rf->folio, nr_sectors << 9, 0); } } - nr_sectors +=3D len>>9; - sector_nr +=3D len>>9; + sector_nr +=3D nr_sectors; } while (0); =20 r1_bio->sectors =3D nr_sectors; --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 94D1733B6E7; Wed, 28 Jan 2026 08:06:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587585; cv=none; b=RWjh25bM9+3BaJtpW0bMZwy5UsyPy4MpLgNnE6nMeAsXU+a6vxL6k8KtonNpDh0OULz6Toso8gjlKQQtHfZ5dmmT4cqpIIwiLKtRFgDZq8wDFKIcKzeaHnmEuicr+qlmLxIncJZhD5xS0mBeetlo7w0lJbynQFBupB6XgcUS0x0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587585; c=relaxed/simple; bh=yfudnzEzQRr7C5O99A5U/bgzRdWaEJ6Vw+8Jo3UY7Xo=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=Vv9SlVEjkyvDU5CNH0LI7F8WRdsoz2TEmQ3zsaXcwD4F3zMXmLlvY0TrDCi8Cc9/CwhlOix04jiNIV3xju7kvp5vyT/uIQ8ENT6uyfUlOdKwLBnp83J3vhdLgj9kMc8fA1Menvk+J5G+hJsEH3zPbGsrmgB7rm+IWs5qChs0+pk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4f1FFZ0RQmzYQv1K; Wed, 28 Jan 2026 16:05:38 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 02AE74056E; Wed, 28 Jan 2026 16:06:15 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S17; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 13/14] md/raid10: clean up sync IO size calculation in raid10_sync_request Date: Wed, 28 Jan 2026 15:57:07 +0800 Message-Id: <20260128075708.2259525-14-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S17 X-Coremail-Antispam: 1UD129KBjvJXoW7Cw1DAry7WF4fCF47Jw18Xwb_yoW8Gr4fpF 1DGr97W3y8ta18Zws8Ja1UC3WFyas5trWjyrWxW3ZxGF1fur9Fka18XF1FgFyDWa43GryY q34vyr4Yy3Z7JF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQF14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJwCI42IY6xAIw20EY4 v20xvaj40_Jr0_JF4lIxAIcVC2z280aVAFwI0_Gr0_Cr1lIxAIcVC2z280aVCY1x0267AK xVW8Jr0_Cr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Use 'nr_sectors' directly for sync IO size calculation. Prepare folio allocation failure fallback. No functional changes. Signed-off-by: Li Nan --- drivers/md/raid10.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b728131bdad4..030812f908ac 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3618,28 +3618,24 @@ static sector_t raid10_sync_request(struct mddev *m= ddev, sector_t sector_nr, } } =20 - nr_sectors =3D 0; if (sector_nr + max_sync < max_sector) max_sector =3D sector_nr + max_sync; do { - int len =3D RESYNC_BLOCK_SIZE; + nr_sectors =3D max_sector - sector_nr; =20 - if (sector_nr + (len>>9) > max_sector) - len =3D (max_sector - sector_nr) << 9; - if (len =3D=3D 0) + if (nr_sectors =3D=3D 0) break; for (bio=3D biolist ; bio ; bio=3Dbio->bi_next) { struct resync_folio *rf =3D get_resync_folio(bio); =20 - if (WARN_ON(!bio_add_folio(bio, rf->folio, len, 0))) { + if (WARN_ON(!bio_add_folio(bio, rf->folio, nr_sectors << 9, 0))) { bio->bi_status =3D BLK_STS_RESOURCE; bio_endio(bio); *skipped =3D 1; - return len; + return nr_sectors << 9; } } - nr_sectors +=3D len>>9; - sector_nr +=3D len>>9; + sector_nr +=3D nr_sectors;; } while (0); r10_bio->sectors =3D nr_sectors; =20 --=20 2.39.2 From nobody Sun Feb 8 22:59:32 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 50AFD3382E6; Wed, 28 Jan 2026 08:06:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587585; cv=none; b=TyLVsWDu3KZi2GMws5sTwYJ0epb6q3wDWvdvCSUxRLAjr2RUtxu+UP3vEGiCJVyKiG3ZxAhRzkBwSYcyEekANtoJUEcKZxj1oPLFGIN+dV4WoPJlLtR9E00hBMO+cQAa+YNvBUy/xoJ80K8gFfGDBFDaufKWxpJk2GoGzrqlXxc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1769587585; c=relaxed/simple; bh=cMAZKlzIKPR3uGJ6ezn7Dt5blppwOj18zFERVH8Yrl8=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=Ufu6BvFOowYftn2h3yjuXMq9LwQW3I2he8329vERbXpWvCAfzMyDDbz1qRRLuRe4MtbU/kmYbcZY5xCpzyDFwDxtsKehmSDowthg0/0Lcf77dnJ5GiGNTba4YhXuMrp3JQAj/ChtePCnou3ekDMZpQZp2sBU+FonQk2P+tea6mI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4f1FFZ0v2ZzYQv2R; Wed, 28 Jan 2026 16:05:38 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 1338640573; Wed, 28 Jan 2026 16:06:15 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgBnFvd1w3lpPgyWFQ--.43207S18; Wed, 28 Jan 2026 16:06:14 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: xni@redhat.com, linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH v2 14/14] md/raid1,raid10: fall back to smaller order if sync folio alloc fails Date: Wed, 28 Jan 2026 15:57:08 +0800 Message-Id: <20260128075708.2259525-15-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20260128075708.2259525-1-linan666@huaweicloud.com> References: <20260128075708.2259525-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgBnFvd1w3lpPgyWFQ--.43207S18 X-Coremail-Antispam: 1UD129KBjvJXoW3XFykWr4xGFyxtw45ur1xXwb_yoW7CrWUpa 1UGrySv34rtFW3X3yfJr1DuF1Fk34fWayUAFnrWwn7u3WfWryDuF4UXay5WF1DZFn8AFy2 q3WDAr45uFs5JaUanT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQF14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJwCI42IY6xAIw20EY4 v20xvaj40_Jr0_JF4lIxAIcVC2z280aVAFwI0_Gr0_Cr1lIxAIcVC2z280aVCY1x0267AK xVW8Jr0_Cr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan RESYNC_BLOCK_SIZE (64K) has higher allocation failure chance than 4k, so retry with lower orders to improve allocation reliability. A r1/10_bio may have different rf->folio orders. Use minimum order as r1/10_bio sectors to prevent exceeding size when adding folio to IO later. Signed-off-by: Li Nan --- drivers/md/raid1-10.c | 14 +++++++++++--- drivers/md/raid1.c | 13 +++++++++---- drivers/md/raid10.c | 28 ++++++++++++++++++++++++++-- 3 files changed, 46 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index ffbd7bd0f6e8..e966d11a81e7 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -41,12 +41,20 @@ static void rbio_pool_free(void *rbio, void *data) } =20 static inline int resync_alloc_folio(struct resync_folio *rf, - gfp_t gfp_flags) + gfp_t gfp_flags, int *order) { - rf->folio =3D folio_alloc(gfp_flags, get_order(RESYNC_BLOCK_SIZE)); - if (!rf->folio) + struct folio *folio; + + do { + folio =3D folio_alloc(gfp_flags, *order); + if (folio) + break; + } while (--(*order) > 0); + + if (!folio) return -ENOMEM; =20 + rf->folio =3D folio; return 0; } =20 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 2253e65c5f03..5bee846f1534 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -149,6 +149,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *d= ata) int need_folio; int j; struct resync_folio *rfs; + int order =3D get_order(RESYNC_BLOCK_SIZE); =20 r1_bio =3D r1bio_pool_alloc(gfp_flags, conf); if (!r1_bio) @@ -182,7 +183,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *d= ata) struct resync_folio *rf =3D &rfs[j]; =20 if (j < need_folio) { - if (resync_alloc_folio(rf, gfp_flags)) + if (resync_alloc_folio(rf, gfp_flags, &order)) goto out_free_folio; } else { memcpy(rf, &rfs[0], sizeof(*rf)); @@ -193,6 +194,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *d= ata) r1_bio->bios[j]->bi_private =3D rf; } =20 + r1_bio->sectors =3D 1 << (order + PAGE_SECTORS_SHIFT); r1_bio->master_bio =3D NULL; =20 return r1_bio; @@ -2776,7 +2778,7 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, int write_targets =3D 0, read_targets =3D 0; sector_t sync_blocks; bool still_degraded =3D false; - int good_sectors =3D RESYNC_SECTORS; + int good_sectors; int min_bad =3D 0; /* number of sectors that are bad in all devices */ int idx =3D sector_to_idx(sector_nr); =20 @@ -2858,8 +2860,11 @@ static sector_t raid1_sync_request(struct mddev *mdd= ev, sector_t sector_nr, r1_bio->sector =3D sector_nr; r1_bio->state =3D 0; set_bit(R1BIO_IsSync, &r1_bio->state); - /* make sure good_sectors won't go across barrier unit boundary */ - good_sectors =3D align_to_barrier_unit_end(sector_nr, good_sectors); + /* + * make sure good_sectors won't go across barrier unit boundary. + * r1_bio->sectors <=3D RESYNC_SECTORS. + */ + good_sectors =3D align_to_barrier_unit_end(sector_nr, r1_bio->sectors); =20 for (i =3D 0; i < conf->raid_disks * 2; i++) { struct md_rdev *rdev; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 030812f908ac..72c77db9957c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -135,6 +135,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) int j; int nalloc, nalloc_rf; struct resync_folio *rfs; + int order =3D get_order(RESYNC_BLOCK_SIZE); =20 r10_bio =3D r10bio_pool_alloc(gfp_flags, conf); if (!r10_bio) @@ -185,7 +186,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) =20 if (!j || test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) { - if (resync_alloc_folio(rf, gfp_flags)) + if (resync_alloc_folio(rf, gfp_flags, &order)) goto out_free_folio; } else { memcpy(rf, &rfs[0], sizeof(*rf)); @@ -200,6 +201,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) } } =20 + r10_bio->sectors =3D 1 << (order + PAGE_SECTORS_SHIFT); return r10_bio; =20 out_free_folio: @@ -3374,6 +3376,15 @@ static sector_t raid10_sync_request(struct mddev *md= dev, sector_t sector_nr, continue; } } + + /* + * RESYNC_BLOCK_SIZE folio might alloc failed in + * resync_alloc_folio(). Fall back to smaller sync + * size if needed. + */ + if (max_sync > r10_bio->sectors) + max_sync =3D r10_bio->sectors; + any_working =3D 1; bio =3D r10_bio->devs[0].bio; bio->bi_next =3D biolist; @@ -3525,7 +3536,15 @@ static sector_t raid10_sync_request(struct mddev *md= dev, sector_t sector_nr, } if (sync_blocks < max_sync) max_sync =3D sync_blocks; + r10_bio =3D raid10_alloc_init_r10buf(conf); + /* + * RESYNC_BLOCK_SIZE folio might alloc failed in resync_alloc_folio(). + * Fall back to smaller sync size if needed. + */ + if (max_sync > r10_bio->sectors) + max_sync =3D r10_bio->sectors; + r10_bio->state =3D 0; =20 r10_bio->mddev =3D mddev; @@ -4702,7 +4721,12 @@ static sector_t reshape_request(struct mddev *mddev,= sector_t sector_nr, r10_bio->mddev =3D mddev; r10_bio->sector =3D sector_nr; set_bit(R10BIO_IsReshape, &r10_bio->state); - r10_bio->sectors =3D last - sector_nr + 1; + /* + * RESYNC_BLOCK_SIZE folio might alloc failed in + * resync_alloc_folio(). Fall back to smaller sync + * size if needed. + */ + r10_bio->sectors =3D min_t(int, r10_bio->sectors, last - sector_nr + 1); rdev =3D read_balance(conf, r10_bio, &max_sectors); BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); =20 --=20 2.39.2