From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CE29A29B764; Wed, 17 Dec 2025 12:11:12 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; cv=none; b=GPFHTW9BtMihN3m9ybyqbBJ73IX6OPRUN2/NqwempvdabJL9ZSc/7N0231mAN+obf/w3Vb5cwd2UzpaDqB30OgdF9FD8HoqLV+bxAX8cg8b5swncohyoS4YoY/ul5GLtC2bVkoKQmnhvCTJYu9hdgf8WhrXPLYlIdSmAZY0MGAc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; c=relaxed/simple; bh=Z81F5faV8Go/v42IbDRj5OpLIq2tSbyJukEKkImbA/M=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=YaqXo+11ZJZP3iCJ0KZu9qMLPIU5aXIMoBiTrbUML06P9iboEW10cSYZi8LmDKl7Ev8aBQwRUvsTIwlyu1X73EuNl2+S5VzPJFYNi4SRddR2TJcY8wq7+bmScGY5WGa9fJa/rKxjFgh+XoLILpnUyOu1w9IYSBe7+N+vog6e+r4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4dWXgl2kQwzYQvF4; Wed, 17 Dec 2025 20:10:43 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 3B0C340575; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S5; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 01/15] md/raid1,raid10: clean up of RESYNC_SECTORS Date: Wed, 17 Dec 2025 19:59:59 +0800 Message-Id: <20251217120013.2616531-2-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S5 X-Coremail-Antispam: 1UD129KBjvJXoWxJr4rtF4rJFWUZryDKrWrGrg_yoW5JFy8pa 1DGrySvw45KF47Jas7JayUua1Yy3Zrt3yUCrn5Za95uFy3XrZrXrWjqayYgF1DXFn5tFy2 q3WDCr4UZFy3taUanT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUmC14x267AKxVW5JVWrJwAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_Jr4l82xGYIkIc2 x26xkF7I0E14v26r1I6r4UM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2z4x0 Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJw A2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq3wAa c4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0VAKzV Aqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4UJwAm 72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20VAGYx C7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCFx2Iq xVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14v26r 106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY67AK xVWUJVWUCwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr0_Cr1lIxAIcVCF04k26cxKx2IYs7 xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_Gr0_ Gr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUfCzNUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Move redundant RESYNC_SECTORS definition from raid1 and raid10 implementations to raid1-10.c. Simplify max_sync assignment in raid10_sync_request(). No functional changes. Signed-off-by: Li Nan --- drivers/md/raid1-10.c | 1 + drivers/md/raid1.c | 1 - drivers/md/raid10.c | 4 +--- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 521625756128..260d7fd7ccbe 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -2,6 +2,7 @@ /* Maximum size of each resync request */ #define RESYNC_BLOCK_SIZE (64*1024) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) +#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) =20 /* * Number of guaranteed raid bios in case of extreme VM load: diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 00120c86c443..407925951299 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -136,7 +136,6 @@ static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r= 1conf *conf) } =20 #define RESYNC_DEPTH 32 -#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH) #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9) #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1adad768e277..1e57d9ce98e7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -113,7 +113,6 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *= data) return kzalloc(size, gfp_flags); } =20 -#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) /* amount of memory to reserve for resync requests */ #define RESYNC_WINDOW (1024*1024) /* maximum number of concurrent requests, memory permitting */ @@ -3171,7 +3170,7 @@ static sector_t raid10_sync_request(struct mddev *mdd= ev, sector_t sector_nr, struct bio *biolist =3D NULL, *bio; sector_t nr_sectors; int i; - int max_sync; + int max_sync =3D RESYNC_SECTORS; sector_t sync_blocks; sector_t chunk_mask =3D conf->geo.chunk_mask; int page_idx =3D 0; @@ -3284,7 +3283,6 @@ static sector_t raid10_sync_request(struct mddev *mdd= ev, sector_t sector_nr, * end_sync_write if we will want to write. */ =20 - max_sync =3D RESYNC_PAGES << (PAGE_SHIFT-9); if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { /* recovery... the complicated one */ int j; --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CC673328B7D; Wed, 17 Dec 2025 12:11:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973480; cv=none; b=rLEqCYwiLkGBNcG/aFT5RbvJR+MOZvnd8lwIlJIt0Aw1WmY+pYEP0jxk7t/vXQoIoVymJihqx1YahM5fRmhyef/Tje8zQiyMZn6Tfi2Dh5b2qjz5hhrSe8ERmRzzljMx/umgWg/+uYOmfjGviQg1YdNRTnesHg1jq+ODATCDs+k= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973480; c=relaxed/simple; bh=vxJQfsEHOgj15WJX57QttQUdcA7U3E5FweBDf6WQcZE=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=YAkzx2EN1ajcFtvdEobaLIVnTQd/4S0lI4k0365ZIKYJ5p2/t5hfRGBZOgw/FY55kI605Dnaoims6cMKX2iXqyW8cNSDn+8EU7ElYFHpYrkmwyc0LNZkrnY/g3wd3xk2m0culQsqiNlVIftUrMpGUSKZiTNOYygWuOnq5abcVO8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.177]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh51JqmzKHN3L; Wed, 17 Dec 2025 20:11:01 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 4456640590; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S6; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 02/15] md: introduce sync_folio_io for folio support in RAID Date: Wed, 17 Dec 2025 20:00:00 +0800 Message-Id: <20251217120013.2616531-3-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S6 X-Coremail-Antispam: 1UD129KBjvJXoWxXw4DXF1xCF4UCF4xJFWfGrg_yoW5Gr1fpa 40kasxG3y5Zw42gw13JFs7Ca4Sq34IgrWUtryfuayfW3W7KryDKF4UtF1jvF98GF98CF4x t34jgay5urn5Wr7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUmC14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_Jryl82xGYIkIc2 x26xkF7I0E14v26r4j6ryUM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2z4x0 Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJw A2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq3wAa c4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0VAKzV Aqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4UJwAm 72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20VAGYx C7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCFx2Iq xVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14v26r 106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY67AK xVWUJVWUCwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr0_Cr1lIxAIcVCF04k26cxKx2IYs7 xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_Gr0_ Gr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUFD73UUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Prepare for folio support in RAID by introducing sync_folio_io(), matching sync_page_io()'s functionality. Differences are: - Replace input parameter 'page' with 'folio' - Replace __bio_add_page() calls with bio_add_folio_nofail() - Add new parameter 'off' to prepare for adding a folio to bio in segments, e.g. in fix_recovery_read_error() sync_page_io() will be removed once full folio support is complete. Signed-off-by: Li Nan --- drivers/md/md.h | 2 ++ drivers/md/md.c | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/drivers/md/md.h b/drivers/md/md.h index a083f37374d0..410f8a6b75e7 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -920,6 +920,8 @@ void md_write_metadata(struct mddev *mddev, struct md_r= dev *rdev, extern int md_super_wait(struct mddev *mddev); extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct page *page, blk_opf_t opf, bool metadata_op); +extern int sync_folio_io(struct md_rdev *rdev, sector_t sector, int size, + int off, struct folio *folio, blk_opf_t opf, bool metadata_op); extern void md_do_sync(struct md_thread *thread); extern void md_new_event(void); extern void md_allow_write(struct mddev *mddev); diff --git a/drivers/md/md.c b/drivers/md/md.c index cde84c9f05eb..9dfd6f8da5b8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1192,6 +1192,33 @@ int sync_page_io(struct md_rdev *rdev, sector_t sect= or, int size, } EXPORT_SYMBOL_GPL(sync_page_io); =20 +int sync_folio_io(struct md_rdev *rdev, sector_t sector, int size, int off, + struct folio *folio, blk_opf_t opf, bool metadata_op) +{ + struct bio bio; + struct bio_vec bvec; + + if (metadata_op && rdev->meta_bdev) + bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); + else + bio_init(&bio, rdev->bdev, &bvec, 1, opf); + + if (metadata_op) + bio.bi_iter.bi_sector =3D sector + rdev->sb_start; + else if (rdev->mddev->reshape_position !=3D MaxSector && + (rdev->mddev->reshape_backwards =3D=3D + (sector >=3D rdev->mddev->reshape_position))) + bio.bi_iter.bi_sector =3D sector + rdev->new_data_offset; + else + bio.bi_iter.bi_sector =3D sector + rdev->data_offset; + bio_add_folio_nofail(&bio, folio, size, off); + + submit_bio_wait(&bio); + + return !bio.bi_status; +} +EXPORT_SYMBOL_GPL(sync_folio_io); + static int read_disk_sb(struct md_rdev *rdev, int size) { if (rdev->sb_loaded) --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id EAF5329DB61; Wed, 17 Dec 2025 12:11:12 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; cv=none; b=VYula+sjZ02Lp+VOEGdN/CXZ1RDc94HHDz9mq+YrioU/+PZEQC9exrb/dmkzAzy33j8NPZrGFWj3dkhOf+tozl1/hLconjzQrcihJLxB1N4VOkkFU0M6zb+D6gySy70XtMvEVWfA5gxVvF65kvrgviuxd7XhDODEOrHxf9HNJ2I= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; c=relaxed/simple; bh=2b5c/j00HnDiVyqQ13d12XMbpabeqX+G9Qgg8I9MUbY=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=E8xLuzGun8Tp0mIMx2w6WtzgrZ5w4arjWjs3NnfUb7jJlrGZEeFCFniiM/h6dEYxQZNSRT/Is837lBdSIX0d7383MGP4YAiOI+CHTBKGr3Rk+s84wFFEaa2FEgpAtvmCJDRHeOw50HRz1uBUe8ojP/td+RaRNutvjzXRQKxARd4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4dWXgl3NG2zYQvGF; Wed, 17 Dec 2025 20:10:43 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 511A340575; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S7; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 03/15] md: use folio for bb_folio Date: Wed, 17 Dec 2025 20:00:01 +0800 Message-Id: <20251217120013.2616531-4-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S7 X-Coremail-Antispam: 1UD129KBjvJXoWxWFy8tryfWF17uF4fKrW3ZFb_yoW5ZF4fpa ykWasxtr45Jr1jqwsrGFWvkasYv3sxKFW8trWayw13X3W3tw15KF1UtFy7Zr98AF9xAF4k Zw4UCrWUu3WIgr7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUmC14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JrWl82xGYIkIc2 x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2z4x0 Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJw A2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq3wAa c4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0VAKzV Aqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4UJwAm 72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20VAGYx C7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCFx2Iq xVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14v26r 106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY67AK xVWUJVWUCwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr0_Cr1lIxAIcVCF04k26cxKx2IYs7 xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_Gr0_ Gr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUB89NUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Convert bio_page to bio_folio and use it throughout. Signed-off-by: Li Nan --- drivers/md/md.h | 3 ++- drivers/md/md.c | 25 +++++++++++++------------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/md/md.h b/drivers/md/md.h index 410f8a6b75e7..aa6d9df50fd0 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -144,7 +144,8 @@ struct md_rdev { struct block_device *bdev; /* block device handle */ struct file *bdev_file; /* Handle from open for bdev */ =20 - struct page *sb_page, *bb_page; + struct page *sb_page; + struct folio *bb_folio; int sb_loaded; __u64 sb_events; sector_t data_offset; /* start of data in array */ diff --git a/drivers/md/md.c b/drivers/md/md.c index 9dfd6f8da5b8..0732bbcdb95d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1073,9 +1073,9 @@ void md_rdev_clear(struct md_rdev *rdev) rdev->sb_start =3D 0; rdev->sectors =3D 0; } - if (rdev->bb_page) { - put_page(rdev->bb_page); - rdev->bb_page =3D NULL; + if (rdev->bb_folio) { + folio_put(rdev->bb_folio); + rdev->bb_folio =3D NULL; } badblocks_exit(&rdev->badblocks); } @@ -1909,9 +1909,10 @@ static int super_1_load(struct md_rdev *rdev, struct= md_rdev *refdev, int minor_ =20 rdev->desc_nr =3D le32_to_cpu(sb->dev_number); =20 - if (!rdev->bb_page) { - rdev->bb_page =3D alloc_page(GFP_KERNEL); - if (!rdev->bb_page) + if (!rdev->bb_folio) { + rdev->bb_folio =3D folio_alloc(GFP_KERNEL, 0); + + if (!rdev->bb_folio) return -ENOMEM; } if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && @@ -1930,10 +1931,10 @@ static int super_1_load(struct md_rdev *rdev, struc= t md_rdev *refdev, int minor_ if (offset =3D=3D 0) return -EINVAL; bb_sector =3D (long long)offset; - if (!sync_page_io(rdev, bb_sector, sectors << 9, - rdev->bb_page, REQ_OP_READ, true)) + if (!sync_folio_io(rdev, bb_sector, sectors << 9, 0, + rdev->bb_folio, REQ_OP_READ, true)) return -EIO; - bbp =3D (__le64 *)page_address(rdev->bb_page); + bbp =3D (__le64 *)folio_address(rdev->bb_folio); rdev->badblocks.shift =3D sb->bblog_shift; for (i =3D 0 ; i < (sectors << (9-3)) ; i++, bbp++) { u64 bb =3D le64_to_cpu(*bbp); @@ -2300,7 +2301,7 @@ static void super_1_sync(struct mddev *mddev, struct = md_rdev *rdev) md_error(mddev, rdev); else { struct badblocks *bb =3D &rdev->badblocks; - __le64 *bbp =3D (__le64 *)page_address(rdev->bb_page); + __le64 *bbp =3D (__le64 *)folio_address(rdev->bb_folio); u64 *p =3D bb->page; sb->feature_map |=3D cpu_to_le32(MD_FEATURE_BAD_BLOCKS); if (bb->changed) { @@ -2953,7 +2954,7 @@ void md_update_sb(struct mddev *mddev, int force_chan= ge) md_write_metadata(mddev, rdev, rdev->badblocks.sector, rdev->badblocks.size << 9, - rdev->bb_page, 0); + folio_page(rdev->bb_folio, 0), 0); rdev->badblocks.size =3D 0; } =20 @@ -3809,7 +3810,7 @@ int md_rdev_init(struct md_rdev *rdev) rdev->sb_events =3D 0; rdev->last_read_error =3D 0; rdev->sb_loaded =3D 0; - rdev->bb_page =3D NULL; + rdev->bb_folio =3D NULL; atomic_set(&rdev->nr_pending, 0); atomic_set(&rdev->read_errors, 0); atomic_set(&rdev->corrected_errors, 0); --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AFC8C28CF50; Wed, 17 Dec 2025 12:11:12 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; cv=none; b=t/vdxNxUdGeIQsdObeCkfU+R1bXkIRpCGILBeTGYvbZzNvUEK/pxcmvNwWnspCL9WA6kqQDtmerupCr8ECtkM86Mtf3NJmnyFhYixT3QZy2XQp2DGJ4D6myAn6A85+9umXLEsifDKgAZIhsTKwAzAkqEE8bE035BjuMVJ9Jhjds= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; c=relaxed/simple; bh=SjXLVWOzFw0ljN1Z292cZugPL49U0NHqJ9a51zk/688=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=qzf6s8QiJWj8I0hb6xZzUpPDfjLvgc+OCay/49VLq6NaxBWRHngJwGDyc4ced2WoXgNywtEkt0gOm8QXWy5Tmbat49rNfEOaaNCTHl3GakKSE+nvSZN7xeecQMGzk6EzTpXvaDz1dziKOAnAukLGbl4hB0f25IwvBfxiV6fgfD4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4dWXgl43pVzYQvHb; Wed, 17 Dec 2025 20:10:43 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 679A840570; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S8; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 04/15] md/raid1: use folio for tmppage Date: Wed, 17 Dec 2025 20:00:02 +0800 Message-Id: <20251217120013.2616531-5-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S8 X-Coremail-Antispam: 1UD129KBjvJXoWxZF48CF1rKrW5Jw4fGFWfZrb_yoW5CrW3pa n8G3Z5tr4UGrZxJryDJFWkua4Sqw1xKayjkFs7G3yS9FsaqF95ZayYk34jgr1DXF98Ja4x XFZ8JrW3ZF1rtF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQj14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUJVWUCwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr0_Cr1lIxAIcVCF04k26cxKx2 IYs7xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_ Gr0_Gr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Convert tmppage to tmpfolio and use it throughout in raid1. Signed-off-by: Li Nan --- drivers/md/raid1.h | 2 +- drivers/md/raid1.c | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index c98d43a7ae99..d480b3a8c2c4 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -101,7 +101,7 @@ struct r1conf { /* temporary buffer to synchronous IO when attempting to repair * a read error. */ - struct page *tmppage; + struct folio *tmpfolio; =20 /* When taking over an array from a different personality, we store * the new thread here until we fully activate the array. diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 407925951299..43453f1a04f4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2417,8 +2417,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) rdev->recovery_offset >=3D sect + s)) && rdev_has_badblock(rdev, sect, s) =3D=3D 0) { atomic_inc(&rdev->nr_pending); - if (sync_page_io(rdev, sect, s<<9, - conf->tmppage, REQ_OP_READ, false)) + if (sync_folio_io(rdev, sect, s<<9, 0, + conf->tmpfolio, REQ_OP_READ, false)) success =3D 1; rdev_dec_pending(rdev, mddev); if (success) @@ -2447,7 +2447,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); r1_sync_page_io(rdev, sect, s, - conf->tmppage, REQ_OP_WRITE); + folio_page(conf->tmpfolio, 0), + REQ_OP_WRITE); rdev_dec_pending(rdev, mddev); } } @@ -2461,7 +2462,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); if (r1_sync_page_io(rdev, sect, s, - conf->tmppage, REQ_OP_READ)) { + folio_page(conf->tmpfolio, 0), + REQ_OP_READ)) { atomic_add(s, &rdev->corrected_errors); pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg= )\n", mdname(mddev), s, @@ -3120,8 +3122,8 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (!conf->mirrors) goto abort; =20 - conf->tmppage =3D alloc_page(GFP_KERNEL); - if (!conf->tmppage) + conf->tmpfolio =3D folio_alloc(GFP_KERNEL, 0); + if (!conf->tmpfolio) goto abort; =20 r1bio_size =3D offsetof(struct r1bio, bios[mddev->raid_disks * 2]); @@ -3196,7 +3198,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (conf) { mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); - safe_put_page(conf->tmppage); + folio_put(conf->tmpfolio); kfree(conf->nr_pending); kfree(conf->nr_waiting); kfree(conf->nr_queued); @@ -3310,7 +3312,7 @@ static void raid1_free(struct mddev *mddev, void *pri= v) =20 mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); - safe_put_page(conf->tmppage); + folio_put(conf->tmpfolio); kfree(conf->nr_pending); kfree(conf->nr_waiting); kfree(conf->nr_queued); --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 756767B3E1; Wed, 17 Dec 2025 12:11:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; cv=none; b=sExJdRukc5P4Sd4pfy6F6thZGHRROLb58vmasljt/BaVuXeAoruyM9ND3WfmcSRKZBTbMq6Vm2YKbRXQ91O2Q9W83TKijwcZTnE99RjfP3XCJxZLhsHEH9T7SjuBzhaDz2hhb3lkfZ9h1JJ0iTWrJZF9nwY2rlaxBbDdjE8tm3U= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; c=relaxed/simple; bh=LNz9tMhiFL1cF5OpIisbRq8yzM03QkxdmoLoydZtK7A=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=afhNCBKHE13yYro0v3gNUsR2xbF0DVSXHwpHJI0y1PBgJfTpAJzYFvT7r2EfK5lDyOjlWl9M0DrfIgLd0Cag+qjhZMWY79jhRgv+CCfeJB4zKkUn2NVkgDE4Yj5wSG8zT1yRUdLooer2JPR85jVns9sDnjkbm7JfpScCro4AL+k= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4dWXgl4Mc4zYQvJ0; Wed, 17 Dec 2025 20:10:43 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 706F440576; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S9; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 05/15] md/raid10: use folio for tmppage Date: Wed, 17 Dec 2025 20:00:03 +0800 Message-Id: <20251217120013.2616531-6-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S9 X-Coremail-Antispam: 1UD129KBjvJXoWxAr18Kr1xGF1fCw13Gr18uFg_yoW5KFyrpa 1DGasIyrWUJw43Xw1DJayDC3WrK34SkFWUtrZ7W3yfua1ftr95K3WUJ3y2gFyDXF98JF1x XFW5XrW3u3Z7tF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQj14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUCVW8JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr0_Cr1lIxAIcVCF04k26cxKx2 IYs7xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r1j6r4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_ Gr0_Gr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Convert tmppage to tmpfolio and use it throughout in raid10. Signed-off-by: Li Nan --- drivers/md/raid10.h | 2 +- drivers/md/raid10.c | 37 +++++++++++++++++++------------------ 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index ec79d87fb92f..19f37439a4e2 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -89,7 +89,7 @@ struct r10conf { =20 mempool_t r10bio_pool; mempool_t r10buf_pool; - struct page *tmppage; + struct folio *tmpfolio; struct bio_set bio_split; =20 /* When taking over an array from a different personality, we store diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1e57d9ce98e7..09238dc9cde6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2581,13 +2581,13 @@ static void recovery_request_write(struct mddev *md= dev, struct r10bio *r10_bio) } } =20 -static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector, - int sectors, struct page *page, enum req_op op) +static int r10_sync_folio_io(struct md_rdev *rdev, sector_t sector, + int sectors, struct folio *folio, enum req_op op) { if (rdev_has_badblock(rdev, sector, sectors) && (op =3D=3D REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags))) return -1; - if (sync_page_io(rdev, sector, sectors << 9, page, op, false)) + if (sync_folio_io(rdev, sector, sectors << 9, 0, folio, op, false)) /* success */ return 1; if (op =3D=3D REQ_OP_WRITE) { @@ -2650,12 +2650,13 @@ static void fix_read_error(struct r10conf *conf, st= ruct mddev *mddev, struct r10 r10_bio->devs[sl].addr + sect, s) =3D=3D 0) { atomic_inc(&rdev->nr_pending); - success =3D sync_page_io(rdev, - r10_bio->devs[sl].addr + - sect, - s<<9, - conf->tmppage, - REQ_OP_READ, false); + success =3D sync_folio_io(rdev, + r10_bio->devs[sl].addr + + sect, + s<<9, + 0, + conf->tmpfolio, + REQ_OP_READ, false); rdev_dec_pending(rdev, mddev); if (success) break; @@ -2698,10 +2699,10 @@ static void fix_read_error(struct r10conf *conf, st= ruct mddev *mddev, struct r10 continue; =20 atomic_inc(&rdev->nr_pending); - if (r10_sync_page_io(rdev, - r10_bio->devs[sl].addr + - sect, - s, conf->tmppage, REQ_OP_WRITE) + if (r10_sync_folio_io(rdev, + r10_bio->devs[sl].addr + + sect, + s, conf->tmpfolio, REQ_OP_WRITE) =3D=3D 0) { /* Well, this device is dead */ pr_notice("md/raid10:%s: read correction write failed (%d sectors at %= llu on %pg)\n", @@ -2730,10 +2731,10 @@ static void fix_read_error(struct r10conf *conf, st= ruct mddev *mddev, struct r10 continue; =20 atomic_inc(&rdev->nr_pending); - switch (r10_sync_page_io(rdev, + switch (r10_sync_folio_io(rdev, r10_bio->devs[sl].addr + sect, - s, conf->tmppage, REQ_OP_READ)) { + s, conf->tmpfolio, REQ_OP_READ)) { case 0: /* Well, this device is dead */ pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sec= tors at %llu on %pg)\n", @@ -3841,7 +3842,7 @@ static void raid10_free_conf(struct r10conf *conf) kfree(conf->mirrors); kfree(conf->mirrors_old); kfree(conf->mirrors_new); - safe_put_page(conf->tmppage); + folio_put(conf->tmpfolio); bioset_exit(&conf->bio_split); kfree(conf); } @@ -3879,8 +3880,8 @@ static struct r10conf *setup_conf(struct mddev *mddev) if (!conf->mirrors) goto out; =20 - conf->tmppage =3D alloc_page(GFP_KERNEL); - if (!conf->tmppage) + conf->tmpfolio =3D folio_alloc(GFP_KERNEL, 0); + if (!conf->tmpfolio) goto out; =20 conf->geo =3D geo; --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C121A26056C; Wed, 17 Dec 2025 12:11:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973482; cv=none; b=goylCEPTIimJTQ5G3ob/MjLsfO1IKGrkLufKlf9Ky7dKGcB74NwiNv5Uw/6/3B3wHA//Vf3sJOHjVV0furmKcFBi6rXHrwa8RFHliw9Fs3hn18fOohzXZynTIIGd003X/4PGsDso4hzMLBKEY0RHWY02Y5rUejwCMdWyDIeo4mc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973482; c=relaxed/simple; bh=yj3QF8azSlYcN+IERCPJu9CQqjaqcyMH9qf8uKVOtH0=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=AZwVRJXbYhXblESG72OddULKHmkelUd+Z68m9lquCdCx/7PVXDcyuPojwdKLwGuhAAg7ujOB4WONMl99YJxfcgMGifB4q0QSgPICHCEjIHV55OBsK3nHByloMz0tDwGhtt7R82LJqTQVkanXA7yTq28lN6WaJwEyojfVWr7imzI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh53PWVzKHN3N; Wed, 17 Dec 2025 20:11:01 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 890314056B; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S10; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 06/15] md/raid1,raid10: use folio for sync path IO Date: Wed, 17 Dec 2025 20:00:04 +0800 Message-Id: <20251217120013.2616531-7-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S10 X-Coremail-Antispam: 1UD129KBjvAXoWfCFWkJryxtr1ftrWxCF4Uurg_yoW5Ww1UCo Z3Jr4ak3WrWr1rurWktr1xtFsrWa15Zw1fJ3WxCrWqvFsruw15Kw47Jry5XrW2qF4aqF43 Cr9agw1fXFZ2vr1xn29KB7ZKAUJUUUU8529EdanIXcx71UUUUU7v73VFW2AGmfu7bjvjm3 AaLaJ3UjIYCTnIWjp_UUUOq7AC8VAFwI0_Wr0E3s1l1xkIjI8I6I8E6xAIw20EY4v20xva j40_Wr0E3s1l1IIY67AEw4v_Jr0_Jr4l82xGYIkIc2x26280x7IE14v26r126s0DM28Irc Ia0xkI8VCY1x0267AKxVW5JVCq3wA2ocxC64kIII0Yj41l84x0c7CEw4AK67xGY2AK021l 84ACjcxK6xIIjxv20xvE14v26w1j6s0DM28EF7xvwVC0I7IYx2IY6xkF7I0E14v26r4UJV WxJr1l84ACjcxK6I8E87Iv67AKxVW0oVCq3wA2z4x0Y4vEx4A2jsIEc7CjxVAFwI0_GcCE 3s1lnxkEFVAIw20F6cxK64vIFxWle2I262IYc4CY6c8Ij28IcVAaY2xG8wAqx4xG64xvF2 IEw4CE5I8CrVC2j2WlYx0E2Ix0cI8IcVAFwI0_Jrv_JF1lYx0Ex4A2jsIE14v26r4UJVWx Jr1lOx8S6xCaFVCjc4AY6r1j6r4UM4x0Y48IcxkI7VAKI48JM4x0x7Aq67IIx4CEVc8vx2 IErcIFxwAKzVCY07xG64k0F24lc7CjxVAaw2AFwI0_JF0_Jw1l42xK82IYc2Ij64vIr41l 4I8I3I0E4IkC6x0Yz7v_Jr0_Gr1lx2IqxVAqx4xG67AKxVWUJVWUGwC20s026x8GjcxK67 AKxVWUGVWUWwC2zVAF1VAY17CE14v26r1q6r43MIIYrxkI7VAKI48JMIIF0xvE2Ix0cI8I cVAFwI0_JFI_Gr1lIxAIcVC0I7IYx2IY6xkF7I0E14v26F4j6r4UJwCI42IY6xAIw20EY4 v20xvaj40_Jr0_JF4lIxAIcVC2z280aVAFwI0_Jr0_Gr1lIxAIcVC2z280aVCY1x0267AK xVW8JVW8JrUvcSsGvfC2KfnxnUUI43ZEXa7VUbItC3UUUUU== X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Convert all IO on the sync path to use folios. Rename page-related identifiers to match folio. Retain some now-unnecessary while and for loops to minimize code changes, clean them up in a subsequent patch. Signed-off-by: Li Nan --- drivers/md/md.c | 2 +- drivers/md/raid1-10.c | 60 ++++-------- drivers/md/raid1.c | 155 ++++++++++++++----------------- drivers/md/raid10.c | 207 +++++++++++++++++++----------------------- 4 files changed, 179 insertions(+), 245 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 0732bbcdb95d..dac03b831efa 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -9409,7 +9409,7 @@ static bool sync_io_within_limit(struct mddev *mddev) { /* * For raid456, sync IO is stripe(4k) per IO, for other levels, it's - * RESYNC_PAGES(64k) per IO. + * RESYNC_BLOCK_SIZE(64k) per IO. */ return atomic_read(&mddev->recovery_active) < (raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev); diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 260d7fd7ccbe..b8f2cc32606f 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -25,9 +25,9 @@ #define MAX_PLUG_BIO 32 =20 /* for managing resync I/O pages */ -struct resync_pages { +struct resync_folio { void *raid_bio; - struct page *pages[RESYNC_PAGES]; + struct folio *folio; }; =20 struct raid1_plug_cb { @@ -41,77 +41,55 @@ static void rbio_pool_free(void *rbio, void *data) kfree(rbio); } =20 -static inline int resync_alloc_pages(struct resync_pages *rp, +static inline int resync_alloc_folio(struct resync_folio *rf, gfp_t gfp_flags) { - int i; - - for (i =3D 0; i < RESYNC_PAGES; i++) { - rp->pages[i] =3D alloc_page(gfp_flags); - if (!rp->pages[i]) - goto out_free; - } + rf->folio =3D folio_alloc(gfp_flags, get_order(RESYNC_BLOCK_SIZE)); + if (!rf->folio) + return -ENOMEM; =20 return 0; - -out_free: - while (--i >=3D 0) - put_page(rp->pages[i]); - return -ENOMEM; } =20 -static inline void resync_free_pages(struct resync_pages *rp) +static inline void resync_free_folio(struct resync_folio *rf) { - int i; - - for (i =3D 0; i < RESYNC_PAGES; i++) - put_page(rp->pages[i]); + folio_put(rf->folio); } =20 -static inline void resync_get_all_pages(struct resync_pages *rp) +static inline void resync_get_all_folio(struct resync_folio *rf) { - int i; - - for (i =3D 0; i < RESYNC_PAGES; i++) - get_page(rp->pages[i]); + folio_get(rf->folio); } =20 -static inline struct page *resync_fetch_page(struct resync_pages *rp, - unsigned idx) +static inline struct folio *resync_fetch_folio(struct resync_folio *rf) { - if (WARN_ON_ONCE(idx >=3D RESYNC_PAGES)) - return NULL; - return rp->pages[idx]; + return rf->folio; } =20 /* - * 'strct resync_pages' stores actual pages used for doing the resync + * 'strct resync_folio' stores actual pages used for doing the resync * IO, and it is per-bio, so make .bi_private points to it. */ -static inline struct resync_pages *get_resync_pages(struct bio *bio) +static inline struct resync_folio *get_resync_folio(struct bio *bio) { return bio->bi_private; } =20 /* generally called after bio_reset() for reseting bvec */ -static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages= *rp, +static void md_bio_reset_resync_folio(struct bio *bio, struct resync_folio= *rf, int size) { - int idx =3D 0; - /* initialize bvec table again */ do { - struct page *page =3D resync_fetch_page(rp, idx); - int len =3D min_t(int, size, PAGE_SIZE); + struct folio *folio =3D resync_fetch_folio(rf); + int len =3D min_t(int, size, RESYNC_BLOCK_SIZE); =20 - if (WARN_ON(!bio_add_page(bio, page, len, 0))) { + if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) { bio->bi_status =3D BLK_STS_RESOURCE; bio_endio(bio); return; } - - size -=3D len; - } while (idx++ < RESYNC_PAGES && size > 0); + } while (0); } =20 =20 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 43453f1a04f4..370bdecf5487 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -120,11 +120,11 @@ static void remove_serial(struct md_rdev *rdev, secto= r_t lo, sector_t hi) =20 /* * for resync bio, r1bio pointer can be retrieved from the per-bio - * 'struct resync_pages'. + * 'struct resync_folio'. */ static inline struct r1bio *get_resync_r1bio(struct bio *bio) { - return get_resync_pages(bio)->raid_bio; + return get_resync_folio(bio)->raid_bio; } =20 static void *r1bio_pool_alloc(gfp_t gfp_flags, struct r1conf *conf) @@ -146,70 +146,69 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void = *data) struct r1conf *conf =3D data; struct r1bio *r1_bio; struct bio *bio; - int need_pages; + int need_folio; int j; - struct resync_pages *rps; + struct resync_folio *rfs; =20 r1_bio =3D r1bio_pool_alloc(gfp_flags, conf); if (!r1_bio) return NULL; =20 - rps =3D kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_pages), + rfs =3D kmalloc_array(conf->raid_disks * 2, sizeof(struct resync_folio), gfp_flags); - if (!rps) + if (!rfs) goto out_free_r1bio; =20 /* * Allocate bios : 1 for reading, n-1 for writing */ for (j =3D conf->raid_disks * 2; j-- ; ) { - bio =3D bio_kmalloc(RESYNC_PAGES, gfp_flags); + bio =3D bio_kmalloc(1, gfp_flags); if (!bio) goto out_free_bio; - bio_init_inline(bio, NULL, RESYNC_PAGES, 0); + bio_init_inline(bio, NULL, 1, 0); r1_bio->bios[j] =3D bio; } /* - * Allocate RESYNC_PAGES data pages and attach them to - * the first bio. + * Allocate data folio and attach them to the first bio. * If this is a user-requested check/repair, allocate - * RESYNC_PAGES for each bio. + * folio for each bio. */ if (test_bit(MD_RECOVERY_REQUESTED, &conf->mddev->recovery)) - need_pages =3D conf->raid_disks * 2; + need_folio =3D conf->raid_disks * 2; else - need_pages =3D 1; + need_folio =3D 1; for (j =3D 0; j < conf->raid_disks * 2; j++) { - struct resync_pages *rp =3D &rps[j]; + struct resync_folio *rf =3D &rfs[j]; =20 bio =3D r1_bio->bios[j]; =20 - if (j < need_pages) { - if (resync_alloc_pages(rp, gfp_flags)) - goto out_free_pages; + if (j < need_folio) { + if (resync_alloc_folio(rf, gfp_flags)) + goto out_free_folio; } else { - memcpy(rp, &rps[0], sizeof(*rp)); - resync_get_all_pages(rp); + memcpy(rf, &rfs[0], sizeof(*rf)); + resync_get_all_folio(rf); } =20 - rp->raid_bio =3D r1_bio; - bio->bi_private =3D rp; + rf->raid_bio =3D r1_bio; + bio->bi_private =3D rf; } =20 r1_bio->master_bio =3D NULL; =20 return r1_bio; =20 -out_free_pages: +out_free_folio: while (--j >=3D 0) - resync_free_pages(&rps[j]); + resync_free_folio(&rfs[j]); =20 out_free_bio: while (++j < conf->raid_disks * 2) { bio_uninit(r1_bio->bios[j]); kfree(r1_bio->bios[j]); } - kfree(rps); + kfree(rfs); =20 out_free_r1bio: rbio_pool_free(r1_bio, data); @@ -221,17 +220,17 @@ static void r1buf_pool_free(void *__r1_bio, void *dat= a) struct r1conf *conf =3D data; int i; struct r1bio *r1bio =3D __r1_bio; - struct resync_pages *rp =3D NULL; + struct resync_folio *rf =3D NULL; =20 for (i =3D conf->raid_disks * 2; i--; ) { - rp =3D get_resync_pages(r1bio->bios[i]); - resync_free_pages(rp); + rf =3D get_resync_folio(r1bio->bios[i]); + resync_free_folio(rf); bio_uninit(r1bio->bios[i]); kfree(r1bio->bios[i]); } =20 - /* resync pages array stored in the 1st bio's .bi_private */ - kfree(rp); + /* resync folio stored in the 1st bio's .bi_private */ + kfree(rf); =20 rbio_pool_free(r1bio, data); } @@ -2095,10 +2094,10 @@ static void end_sync_write(struct bio *bio) put_sync_write_buf(r1_bio); } =20 -static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, - int sectors, struct page *page, blk_opf_t rw) +static int r1_sync_folio_io(struct md_rdev *rdev, sector_t sector, int sec= tors, + int off, struct folio *folio, blk_opf_t rw) { - if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) + if (sync_folio_io(rdev, sector, sectors << 9, off, folio, rw, false)) /* success */ return 1; if (rw =3D=3D REQ_OP_WRITE) { @@ -2129,10 +2128,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio) struct mddev *mddev =3D r1_bio->mddev; struct r1conf *conf =3D mddev->private; struct bio *bio =3D r1_bio->bios[r1_bio->read_disk]; - struct page **pages =3D get_resync_pages(bio)->pages; + struct folio *folio =3D get_resync_folio(bio)->folio; sector_t sect =3D r1_bio->sector; int sectors =3D r1_bio->sectors; - int idx =3D 0; + int off =3D 0; struct md_rdev *rdev; =20 rdev =3D conf->mirrors[r1_bio->read_disk].rdev; @@ -2162,9 +2161,8 @@ static int fix_sync_read_error(struct r1bio *r1_bio) * active, and resync is currently active */ rdev =3D conf->mirrors[d].rdev; - if (sync_page_io(rdev, sect, s<<9, - pages[idx], - REQ_OP_READ, false)) { + if (sync_folio_io(rdev, sect, s<<9, off, folio, + REQ_OP_READ, false)) { success =3D 1; break; } @@ -2197,7 +2195,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) /* Try next page */ sectors -=3D s; sect +=3D s; - idx++; + off +=3D s << 9; continue; } =20 @@ -2210,8 +2208,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (r1_bio->bios[d]->bi_end_io !=3D end_sync_read) continue; rdev =3D conf->mirrors[d].rdev; - if (r1_sync_page_io(rdev, sect, s, - pages[idx], + if (r1_sync_folio_io(rdev, sect, s, off, folio, REQ_OP_WRITE) =3D=3D 0) { r1_bio->bios[d]->bi_end_io =3D NULL; rdev_dec_pending(rdev, mddev); @@ -2225,14 +2222,13 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (r1_bio->bios[d]->bi_end_io !=3D end_sync_read) continue; rdev =3D conf->mirrors[d].rdev; - if (r1_sync_page_io(rdev, sect, s, - pages[idx], + if (r1_sync_folio_io(rdev, sect, s, off, folio, REQ_OP_READ) !=3D 0) atomic_add(s, &rdev->corrected_errors); } sectors -=3D s; sect +=3D s; - idx ++; + off +=3D s << 9; } set_bit(R1BIO_Uptodate, &r1_bio->state); bio->bi_status =3D 0; @@ -2252,14 +2248,12 @@ static void process_checks(struct r1bio *r1_bio) struct r1conf *conf =3D mddev->private; int primary; int i; - int vcnt; =20 /* Fix variable parts of all bios */ - vcnt =3D (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); for (i =3D 0; i < conf->raid_disks * 2; i++) { blk_status_t status; struct bio *b =3D r1_bio->bios[i]; - struct resync_pages *rp =3D get_resync_pages(b); + struct resync_folio *rf =3D get_resync_folio(b); if (b->bi_end_io !=3D end_sync_read) continue; /* fixup the bio for reuse, but preserve errno */ @@ -2269,11 +2263,11 @@ static void process_checks(struct r1bio *r1_bio) b->bi_iter.bi_sector =3D r1_bio->sector + conf->mirrors[i].rdev->data_offset; b->bi_end_io =3D end_sync_read; - rp->raid_bio =3D r1_bio; - b->bi_private =3D rp; + rf->raid_bio =3D r1_bio; + b->bi_private =3D rf; =20 /* initialize bvec table again */ - md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); + md_bio_reset_resync_folio(b, rf, r1_bio->sectors << 9); } for (primary =3D 0; primary < conf->raid_disks * 2; primary++) if (r1_bio->bios[primary]->bi_end_io =3D=3D end_sync_read && @@ -2284,44 +2278,30 @@ static void process_checks(struct r1bio *r1_bio) } r1_bio->read_disk =3D primary; for (i =3D 0; i < conf->raid_disks * 2; i++) { - int j =3D 0; struct bio *pbio =3D r1_bio->bios[primary]; struct bio *sbio =3D r1_bio->bios[i]; blk_status_t status =3D sbio->bi_status; - struct page **ppages =3D get_resync_pages(pbio)->pages; - struct page **spages =3D get_resync_pages(sbio)->pages; - struct bio_vec *bi; - int page_len[RESYNC_PAGES] =3D { 0 }; - struct bvec_iter_all iter_all; + struct folio *pfolio =3D get_resync_folio(pbio)->folio; + struct folio *sfolio =3D get_resync_folio(sbio)->folio; =20 if (sbio->bi_end_io !=3D end_sync_read) continue; /* Now we can 'fixup' the error value */ sbio->bi_status =3D 0; =20 - bio_for_each_segment_all(bi, sbio, iter_all) - page_len[j++] =3D bi->bv_len; - - if (!status) { - for (j =3D vcnt; j-- ; ) { - if (memcmp(page_address(ppages[j]), - page_address(spages[j]), - page_len[j])) - break; - } - } else - j =3D 0; - if (j >=3D 0) + if (status || memcmp(folio_address(pfolio), + folio_address(sfolio), + r1_bio->sectors << 9)) { atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); - if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) - && !status)) { - /* No need to write to this device. */ - sbio->bi_end_io =3D NULL; - rdev_dec_pending(conf->mirrors[i].rdev, mddev); - continue; + if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { + bio_copy_data(sbio, pbio); + continue; + } } =20 - bio_copy_data(sbio, pbio); + /* No need to write to this device. */ + sbio->bi_end_io =3D NULL; + rdev_dec_pending(conf->mirrors[i].rdev, mddev); } } =20 @@ -2446,9 +2426,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) if (rdev && !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); - r1_sync_page_io(rdev, sect, s, - folio_page(conf->tmpfolio, 0), - REQ_OP_WRITE); + r1_sync_folio_io(rdev, sect, s, 0, + conf->tmpfolio, REQ_OP_WRITE); rdev_dec_pending(rdev, mddev); } } @@ -2461,9 +2440,8 @@ static void fix_read_error(struct r1conf *conf, struc= t r1bio *r1_bio) if (rdev && !test_bit(Faulty, &rdev->flags)) { atomic_inc(&rdev->nr_pending); - if (r1_sync_page_io(rdev, sect, s, - folio_page(conf->tmpfolio, 0), - REQ_OP_READ)) { + if (r1_sync_folio_io(rdev, sect, s, 0, + conf->tmpfolio, REQ_OP_READ)) { atomic_add(s, &rdev->corrected_errors); pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg= )\n", mdname(mddev), s, @@ -2799,7 +2777,6 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, int good_sectors =3D RESYNC_SECTORS; int min_bad =3D 0; /* number of sectors that are bad in all devices */ int idx =3D sector_to_idx(sector_nr); - int page_idx =3D 0; =20 if (!mempool_initialized(&conf->r1buf_pool)) if (init_resync(conf)) @@ -3003,8 +2980,8 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, nr_sectors =3D 0; sync_blocks =3D 0; do { - struct page *page; - int len =3D PAGE_SIZE; + struct folio *folio; + int len =3D RESYNC_BLOCK_SIZE; if (sector_nr + (len>>9) > max_sector) len =3D (max_sector - sector_nr) << 9; if (len =3D=3D 0) @@ -3020,24 +2997,24 @@ static sector_t raid1_sync_request(struct mddev *md= dev, sector_t sector_nr, } =20 for (i =3D 0 ; i < conf->raid_disks * 2; i++) { - struct resync_pages *rp; + struct resync_folio *rf; =20 bio =3D r1_bio->bios[i]; - rp =3D get_resync_pages(bio); + rf =3D get_resync_folio(bio); if (bio->bi_end_io) { - page =3D resync_fetch_page(rp, page_idx); + folio =3D resync_fetch_folio(rf); =20 /* * won't fail because the vec table is big * enough to hold all these pages */ - __bio_add_page(bio, page, len, 0); + bio_add_folio_nofail(bio, folio, len, 0); } } nr_sectors +=3D len>>9; sector_nr +=3D len>>9; sync_blocks -=3D (len>>9); - } while (++page_idx < RESYNC_PAGES); + } while (0); =20 r1_bio->sectors =3D nr_sectors; =20 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 09238dc9cde6..c93706806358 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -96,11 +96,11 @@ static void end_reshape(struct r10conf *conf); =20 /* * for resync bio, r10bio pointer can be retrieved from the per-bio - * 'struct resync_pages'. + * 'struct resync_folio'. */ static inline struct r10bio *get_resync_r10bio(struct bio *bio) { - return get_resync_pages(bio)->raid_bio; + return get_resync_folio(bio)->raid_bio; } =20 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) @@ -133,8 +133,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) struct r10bio *r10_bio; struct bio *bio; int j; - int nalloc, nalloc_rp; - struct resync_pages *rps; + int nalloc, nalloc_rf; + struct resync_folio *rfs; =20 r10_bio =3D r10bio_pool_alloc(gfp_flags, conf); if (!r10_bio) @@ -148,58 +148,57 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void= *data) =20 /* allocate once for all bios */ if (!conf->have_replacement) - nalloc_rp =3D nalloc; + nalloc_rf =3D nalloc; else - nalloc_rp =3D nalloc * 2; - rps =3D kmalloc_array(nalloc_rp, sizeof(struct resync_pages), gfp_flags); - if (!rps) + nalloc_rf =3D nalloc * 2; + rfs =3D kmalloc_array(nalloc_rf, sizeof(struct resync_folio), gfp_flags); + if (!rfs) goto out_free_r10bio; =20 /* * Allocate bios. */ for (j =3D nalloc ; j-- ; ) { - bio =3D bio_kmalloc(RESYNC_PAGES, gfp_flags); + bio =3D bio_kmalloc(1, gfp_flags); if (!bio) goto out_free_bio; - bio_init_inline(bio, NULL, RESYNC_PAGES, 0); + bio_init_inline(bio, NULL, 1, 0); r10_bio->devs[j].bio =3D bio; if (!conf->have_replacement) continue; - bio =3D bio_kmalloc(RESYNC_PAGES, gfp_flags); + bio =3D bio_kmalloc(1, gfp_flags); if (!bio) goto out_free_bio; - bio_init_inline(bio, NULL, RESYNC_PAGES, 0); + bio_init_inline(bio, NULL, 1, 0); r10_bio->devs[j].repl_bio =3D bio; } /* - * Allocate RESYNC_PAGES data pages and attach them - * where needed. + * Allocate data folio and attach them where needed. */ for (j =3D 0; j < nalloc; j++) { struct bio *rbio =3D r10_bio->devs[j].repl_bio; - struct resync_pages *rp, *rp_repl; + struct resync_folio *rf, *rf_repl; =20 - rp =3D &rps[j]; + rf =3D &rfs[j]; if (rbio) - rp_repl =3D &rps[nalloc + j]; + rf_repl =3D &rfs[nalloc + j]; =20 bio =3D r10_bio->devs[j].bio; =20 if (!j || test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) { - if (resync_alloc_pages(rp, gfp_flags)) + if (resync_alloc_folio(rf, gfp_flags)) goto out_free_pages; } else { - memcpy(rp, &rps[0], sizeof(*rp)); - resync_get_all_pages(rp); + memcpy(rf, &rfs[0], sizeof(*rf)); + resync_get_all_folio(rf); } =20 - rp->raid_bio =3D r10_bio; - bio->bi_private =3D rp; + rf->raid_bio =3D r10_bio; + bio->bi_private =3D rf; if (rbio) { - memcpy(rp_repl, rp, sizeof(*rp)); - rbio->bi_private =3D rp_repl; + memcpy(rf_repl, rf, sizeof(*rf)); + rbio->bi_private =3D rf_repl; } } =20 @@ -207,7 +206,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) =20 out_free_pages: while (--j >=3D 0) - resync_free_pages(&rps[j]); + resync_free_folio(&rfs[j]); =20 j =3D 0; out_free_bio: @@ -219,7 +218,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) bio_uninit(r10_bio->devs[j].repl_bio); kfree(r10_bio->devs[j].repl_bio); } - kfree(rps); + kfree(rfs); out_free_r10bio: rbio_pool_free(r10_bio, conf); return NULL; @@ -230,14 +229,14 @@ static void r10buf_pool_free(void *__r10_bio, void *d= ata) struct r10conf *conf =3D data; struct r10bio *r10bio =3D __r10_bio; int j; - struct resync_pages *rp =3D NULL; + struct resync_folio *rf =3D NULL; =20 for (j =3D conf->copies; j--; ) { struct bio *bio =3D r10bio->devs[j].bio; =20 if (bio) { - rp =3D get_resync_pages(bio); - resync_free_pages(rp); + rf =3D get_resync_folio(bio); + resync_free_folio(rf); bio_uninit(bio); kfree(bio); } @@ -250,7 +249,7 @@ static void r10buf_pool_free(void *__r10_bio, void *dat= a) } =20 /* resync pages array stored in the 1st bio's .bi_private */ - kfree(rp); + kfree(rf); =20 rbio_pool_free(r10bio, conf); } @@ -2342,8 +2341,7 @@ static void sync_request_write(struct mddev *mddev, s= truct r10bio *r10_bio) struct r10conf *conf =3D mddev->private; int i, first; struct bio *tbio, *fbio; - int vcnt; - struct page **tpages, **fpages; + struct folio *tfolio, *ffolio; =20 atomic_set(&r10_bio->remaining, 1); =20 @@ -2359,14 +2357,13 @@ static void sync_request_write(struct mddev *mddev,= struct r10bio *r10_bio) fbio =3D r10_bio->devs[i].bio; fbio->bi_iter.bi_size =3D r10_bio->sectors << 9; fbio->bi_iter.bi_idx =3D 0; - fpages =3D get_resync_pages(fbio)->pages; + ffolio =3D get_resync_folio(fbio)->folio; =20 - vcnt =3D (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); /* now find blocks with errors */ for (i=3D0 ; i < conf->copies ; i++) { - int j, d; + int d; struct md_rdev *rdev; - struct resync_pages *rp; + struct resync_folio *rf; =20 tbio =3D r10_bio->devs[i].bio; =20 @@ -2375,31 +2372,23 @@ static void sync_request_write(struct mddev *mddev,= struct r10bio *r10_bio) if (i =3D=3D first) continue; =20 - tpages =3D get_resync_pages(tbio)->pages; + tfolio =3D get_resync_folio(tbio)->folio; d =3D r10_bio->devs[i].devnum; rdev =3D conf->mirrors[d].rdev; if (!r10_bio->devs[i].bio->bi_status) { /* We know that the bi_io_vec layout is the same for * both 'first' and 'i', so we just compare them. - * All vec entries are PAGE_SIZE; */ - int sectors =3D r10_bio->sectors; - for (j =3D 0; j < vcnt; j++) { - int len =3D PAGE_SIZE; - if (sectors < (len / 512)) - len =3D sectors * 512; - if (memcmp(page_address(fpages[j]), - page_address(tpages[j]), - len)) - break; - sectors -=3D len/512; + if (memcmp(folio_address(ffolio), + folio_address(tfolio), + r10_bio->sectors << 9)) { + atomic64_add(r10_bio->sectors, + &mddev->resync_mismatches); + if (test_bit(MD_RECOVERY_CHECK, + &mddev->recovery)) + /* Don't fix anything. */ + continue; } - if (j =3D=3D vcnt) - continue; - atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); - if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) - /* Don't fix anything. */ - continue; } else if (test_bit(FailFast, &rdev->flags)) { /* Just give up on this device */ md_error(rdev->mddev, rdev); @@ -2410,13 +2399,13 @@ static void sync_request_write(struct mddev *mddev,= struct r10bio *r10_bio) * First we need to fixup bv_offset, bv_len and * bi_vecs, as the read request might have corrupted these */ - rp =3D get_resync_pages(tbio); + rf =3D get_resync_folio(tbio); bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE); =20 - md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); + md_bio_reset_resync_folio(tbio, rf, fbio->bi_iter.bi_size); =20 - rp->raid_bio =3D r10_bio; - tbio->bi_private =3D rp; + rf->raid_bio =3D r10_bio; + tbio->bi_private =3D rf; tbio->bi_iter.bi_sector =3D r10_bio->devs[i].addr; tbio->bi_end_io =3D end_sync_write; =20 @@ -2476,10 +2465,9 @@ static void fix_recovery_read_error(struct r10bio *r= 10_bio) struct bio *bio =3D r10_bio->devs[0].bio; sector_t sect =3D 0; int sectors =3D r10_bio->sectors; - int idx =3D 0; int dr =3D r10_bio->devs[0].devnum; int dw =3D r10_bio->devs[1].devnum; - struct page **pages =3D get_resync_pages(bio)->pages; + struct folio *folio =3D get_resync_folio(bio)->folio; =20 while (sectors) { int s =3D sectors; @@ -2492,19 +2480,21 @@ static void fix_recovery_read_error(struct r10bio *= r10_bio) =20 rdev =3D conf->mirrors[dr].rdev; addr =3D r10_bio->devs[0].addr + sect; - ok =3D sync_page_io(rdev, - addr, - s << 9, - pages[idx], - REQ_OP_READ, false); + ok =3D sync_folio_io(rdev, + addr, + s << 9, + sect << 9, + folio, + REQ_OP_READ, false); if (ok) { rdev =3D conf->mirrors[dw].rdev; addr =3D r10_bio->devs[1].addr + sect; - ok =3D sync_page_io(rdev, - addr, - s << 9, - pages[idx], - REQ_OP_WRITE, false); + ok =3D sync_folio_io(rdev, + addr, + s << 9, + sect << 9, + folio, + REQ_OP_WRITE, false); if (!ok) { set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, @@ -2539,7 +2529,6 @@ static void fix_recovery_read_error(struct r10bio *r1= 0_bio) =20 sectors -=3D s; sect +=3D s; - idx++; } } =20 @@ -3174,7 +3163,6 @@ static sector_t raid10_sync_request(struct mddev *mdd= ev, sector_t sector_nr, int max_sync =3D RESYNC_SECTORS; sector_t sync_blocks; sector_t chunk_mask =3D conf->geo.chunk_mask; - int page_idx =3D 0; =20 /* * Allow skipping a full rebuild for incremental assembly @@ -3277,7 +3265,7 @@ static sector_t raid10_sync_request(struct mddev *mdd= ev, sector_t sector_nr, * with 2 bios in each, that correspond to the bios in the main one. * In this case, the subordinate r10bios link back through a * borrowed master_bio pointer, and the counter in the master - * includes a ref from each subordinate. + * bio_add_folio includes a ref from each subordinate. */ /* First, we decide what to do and set ->bi_end_io * To end_sync_read if we want to read, and @@ -3642,25 +3630,26 @@ static sector_t raid10_sync_request(struct mddev *m= ddev, sector_t sector_nr, if (sector_nr + max_sync < max_sector) max_sector =3D sector_nr + max_sync; do { - struct page *page; - int len =3D PAGE_SIZE; + int len =3D RESYNC_BLOCK_SIZE; + if (sector_nr + (len>>9) > max_sector) len =3D (max_sector - sector_nr) << 9; if (len =3D=3D 0) break; for (bio=3D biolist ; bio ; bio=3Dbio->bi_next) { - struct resync_pages *rp =3D get_resync_pages(bio); - page =3D resync_fetch_page(rp, page_idx); - if (WARN_ON(!bio_add_page(bio, page, len, 0))) { + struct resync_folio *rf =3D get_resync_folio(bio); + struct folio *folio =3D resync_fetch_folio(rf); + + if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) { bio->bi_status =3D BLK_STS_RESOURCE; bio_endio(bio); *skipped =3D 1; - return max_sync; + return len; } } nr_sectors +=3D len>>9; sector_nr +=3D len>>9; - } while (++page_idx < RESYNC_PAGES); + } while (0); r10_bio->sectors =3D nr_sectors; =20 if (mddev_is_clustered(mddev) && @@ -4578,7 +4567,7 @@ static sector_t reshape_request(struct mddev *mddev, = sector_t sector_nr, int *skipped) { /* We simply copy at most one chunk (smallest of old and new) - * at a time, possibly less if that exceeds RESYNC_PAGES, + * at a time, possibly less if that exceeds RESYNC_BLOCK_SIZE, * or we hit a bad block or something. * This might mean we pause for normal IO in the middle of * a chunk, but that is not a problem as mddev->reshape_position @@ -4618,14 +4607,13 @@ static sector_t reshape_request(struct mddev *mddev= , sector_t sector_nr, struct r10bio *r10_bio; sector_t next, safe, last; int max_sectors; - int nr_sectors; int s; struct md_rdev *rdev; int need_flush =3D 0; struct bio *blist; struct bio *bio, *read_bio; int sectors_done =3D 0; - struct page **pages; + struct folio *folio; =20 if (sector_nr =3D=3D 0) { /* If restarting in the middle, skip the initial sectors */ @@ -4741,7 +4729,7 @@ static sector_t reshape_request(struct mddev *mddev, = sector_t sector_nr, return sectors_done; } =20 - read_bio =3D bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ, + read_bio =3D bio_alloc_bioset(rdev->bdev, 1, REQ_OP_READ, GFP_KERNEL, &mddev->bio_set); read_bio->bi_iter.bi_sector =3D (r10_bio->devs[r10_bio->read_slot].addr + rdev->data_offset); @@ -4805,32 +4793,23 @@ static sector_t reshape_request(struct mddev *mddev= , sector_t sector_nr, blist =3D b; } =20 - /* Now add as many pages as possible to all of these bios. */ + /* Now add folio to all of these bios. */ =20 - nr_sectors =3D 0; - pages =3D get_resync_pages(r10_bio->devs[0].bio)->pages; - for (s =3D 0 ; s < max_sectors; s +=3D PAGE_SIZE >> 9) { - struct page *page =3D pages[s / (PAGE_SIZE >> 9)]; - int len =3D (max_sectors - s) << 9; - if (len > PAGE_SIZE) - len =3D PAGE_SIZE; - for (bio =3D blist; bio ; bio =3D bio->bi_next) { - if (WARN_ON(!bio_add_page(bio, page, len, 0))) { - bio->bi_status =3D BLK_STS_RESOURCE; - bio_endio(bio); - return sectors_done; - } + folio =3D get_resync_folio(r10_bio->devs[0].bio)->folio; + for (bio =3D blist; bio ; bio =3D bio->bi_next) { + if (WARN_ON(!bio_add_folio(bio, folio, max_sectors, 0))) { + bio->bi_status =3D BLK_STS_RESOURCE; + bio_endio(bio); + return sectors_done; } - sector_nr +=3D len >> 9; - nr_sectors +=3D len >> 9; } - r10_bio->sectors =3D nr_sectors; + r10_bio->sectors =3D max_sectors >> 9; =20 /* Now submit the read */ atomic_inc(&r10_bio->remaining); read_bio->bi_next =3D NULL; submit_bio_noacct(read_bio); - sectors_done +=3D nr_sectors; + sectors_done +=3D max_sectors; if (sector_nr <=3D last) goto read_more; =20 @@ -4932,8 +4911,8 @@ static int handle_reshape_read_error(struct mddev *md= dev, struct r10conf *conf =3D mddev->private; struct r10bio *r10b; int slot =3D 0; - int idx =3D 0; - struct page **pages; + int sect =3D 0; + struct folio *folio; =20 r10b =3D kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); if (!r10b) { @@ -4941,8 +4920,8 @@ static int handle_reshape_read_error(struct mddev *md= dev, return -ENOMEM; } =20 - /* reshape IOs share pages from .devs[0].bio */ - pages =3D get_resync_pages(r10_bio->devs[0].bio)->pages; + /* reshape IOs share folio from .devs[0].bio */ + folio =3D get_resync_folio(r10_bio->devs[0].bio)->folio; =20 r10b->sector =3D r10_bio->sector; __raid10_find_phys(&conf->prev, r10b); @@ -4958,19 +4937,19 @@ static int handle_reshape_read_error(struct mddev *= mddev, while (!success) { int d =3D r10b->devs[slot].devnum; struct md_rdev *rdev =3D conf->mirrors[d].rdev; - sector_t addr; if (rdev =3D=3D NULL || test_bit(Faulty, &rdev->flags) || !test_bit(In_sync, &rdev->flags)) goto failed; =20 - addr =3D r10b->devs[slot].addr + idx * PAGE_SIZE; atomic_inc(&rdev->nr_pending); - success =3D sync_page_io(rdev, - addr, - s << 9, - pages[idx], - REQ_OP_READ, false); + success =3D sync_folio_io(rdev, + r10b->devs[slot].addr + + sect, + s << 9, + sect << 9, + folio, + REQ_OP_READ, false); rdev_dec_pending(rdev, mddev); if (success) break; @@ -4989,7 +4968,7 @@ static int handle_reshape_read_error(struct mddev *md= dev, return -EIO; } sectors -=3D s; - idx++; + sect +=3D s; } kfree(r10b); return 0; --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout11.his.huawei.com (dggsgout11.his.huawei.com [45.249.212.51]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AFC1926056C; Wed, 17 Dec 2025 12:11:12 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.51 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; cv=none; b=K0AH0oaoE6bxN6E2fS2IgulgnYpl/D10c2Y/RVsqNgFdDnt12dyB4ltb201SqXMELl2uCT7FxgTm7bmMEvt9iX8BI+s5ZjwuUuY8xOdJ6MzUuhAwFGIBsgpUfmUDdWWstp98Lp6KpHFVDjt2ggw0/P5be/uXCDzsBg+Q3ZfvBtY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973475; c=relaxed/simple; bh=LElKwJXGVrz7yf90h6WhX2NYtZf9k6hwgGDS8UuMQsM=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=PQxoaaFIhQA7rAAcGL8TxbEjhhM1/oQv53kwvXKa+PxBrC9IiFOH63Sn9KOiHZvzSM1DqoB5k2QI6TCjTPt4uHWAzELBclv/gASbLyC6M1J3OkZKoKUWplCrpliiHw2i1r56WFSDcgImGjE28HeILLpD3rWgd0JfTy4wqNtj/h8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.51 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout11.his.huawei.com (SkyGuard) with ESMTPS id 4dWXgl5fTTzYQvJh; Wed, 17 Dec 2025 20:10:43 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 9E74340576; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S11; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 07/15] md: Clean up folio sync support related code Date: Wed, 17 Dec 2025 20:00:05 +0800 Message-Id: <20251217120013.2616531-8-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S11 X-Coremail-Antispam: 1UD129KBjvJXoWxAF4rJr18CryUKr18uFWfKrg_yoW5urWkpa 9rGrySvayrKF45ZF4Dtw4UAa1Fk34Yga4UCF4fua93uF13ZFyDKF4jqa48Xr1DZF95Ca4F qF93Ja1UuF45tF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUCVW8JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVWUJVW8JwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan 1. Remove resync_get_all_folio() and invoke folio_get() directly instead. 2. Clean up redundant while(0) loop in md_bio_reset_resync_folio(). 3. Clean up bio variable by directly referencing r10_bio->devs[j].bio instead in r1buf_pool_alloc() and r10buf_pool_alloc(). 4. Clean up RESYNC_PAGES. Signed-off-by: Li Nan Reviewed-by: Xiao Ni --- drivers/md/raid1-10.c | 22 ++++++---------------- drivers/md/raid1.c | 6 ++---- drivers/md/raid10.c | 6 ++---- 3 files changed, 10 insertions(+), 24 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index b8f2cc32606f..568ab002691f 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Maximum size of each resync request */ #define RESYNC_BLOCK_SIZE (64*1024) -#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) =20 /* @@ -56,11 +55,6 @@ static inline void resync_free_folio(struct resync_folio= *rf) folio_put(rf->folio); } =20 -static inline void resync_get_all_folio(struct resync_folio *rf) -{ - folio_get(rf->folio); -} - static inline struct folio *resync_fetch_folio(struct resync_folio *rf) { return rf->folio; @@ -80,16 +74,12 @@ static void md_bio_reset_resync_folio(struct bio *bio, = struct resync_folio *rf, int size) { /* initialize bvec table again */ - do { - struct folio *folio =3D resync_fetch_folio(rf); - int len =3D min_t(int, size, RESYNC_BLOCK_SIZE); - - if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) { - bio->bi_status =3D BLK_STS_RESOURCE; - bio_endio(bio); - return; - } - } while (0); + if (WARN_ON(!bio_add_folio(bio, resync_fetch_folio(rf), + min_t(int, size, RESYNC_BLOCK_SIZE), + 0))) { + bio->bi_status =3D BLK_STS_RESOURCE; + bio_endio(bio); + } } =20 =20 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 370bdecf5487..f01bab41da95 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -181,18 +181,16 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void = *data) for (j =3D 0; j < conf->raid_disks * 2; j++) { struct resync_folio *rf =3D &rfs[j]; =20 - bio =3D r1_bio->bios[j]; - if (j < need_folio) { if (resync_alloc_folio(rf, gfp_flags)) goto out_free_folio; } else { memcpy(rf, &rfs[0], sizeof(*rf)); - resync_get_all_folio(rf); + folio_get(rf->folio); } =20 rf->raid_bio =3D r1_bio; - bio->bi_private =3D rf; + r1_bio->bios[j]->bi_private =3D rf; } =20 r1_bio->master_bio =3D NULL; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c93706806358..a03afa9a6a5b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -183,19 +183,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void= *data) if (rbio) rf_repl =3D &rfs[nalloc + j]; =20 - bio =3D r10_bio->devs[j].bio; - if (!j || test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) { if (resync_alloc_folio(rf, gfp_flags)) goto out_free_pages; } else { memcpy(rf, &rfs[0], sizeof(*rf)); - resync_get_all_folio(rf); + folio_get(rf->folio); } =20 rf->raid_bio =3D r10_bio; - bio->bi_private =3D rf; + r10_bio->devs[j].bio->bi_private =3D rf; if (rbio) { memcpy(rf_repl, rf, sizeof(*rf)); rbio->bi_private =3D rf_repl; --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CC80532A3C2; Wed, 17 Dec 2025 12:11:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973481; cv=none; b=V/isd8RtDpo65Eew/w6n5hkiGi+TmrKdzqgbWROmZfUEZG0bJDQLsbJvVhbxPOkiYViOVicHnaiMqXjurl4LBLFO92j8dJPmwN0UCfMFWxkQgRYlTcqlSlEvHS4zFvHwB3xUX6BKMXdOfpnGziEaJkMwKWmkXOUGKkUWUbECXcY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973481; c=relaxed/simple; bh=89xAFIJo2kmpQ+WPh636tStbkdYmmswMN8cLscxBEYM=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=ZBEuB7V9aTbJaCDN0b7RbWfltD5N0ynlPVTssgZBLPzLldMudFlKLuq00u8fGdSgtMCu6BVMbuwy9mrqtfs9a+yyEKb5PRn5/55U8axN59Xq98+YhLdBHyQspvioAxPDxvyv6dqw0yJWL5Y0onhYMc/hz4T1SI4phvd0Nued1V8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=none smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh54STFzKHN4D; Wed, 17 Dec 2025 20:11:01 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id ADBFE40575; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S12; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 08/15] md/raid1: clean up useless sync_blocks handling in raid1_sync_request Date: Wed, 17 Dec 2025 20:00:06 +0800 Message-Id: <20251217120013.2616531-9-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S12 X-Coremail-Antispam: 1UD129KBjvJXoW7CF43Ar43JF1kCw4DGr1rJFb_yoW8AF17pa 17Jryag345WFW5ZasxAr1UCFyFkFy7trWUJryfW3s7WFZ7Gr97CF48X3WagFyqqa43trW5 X3s5Ar45CF13tF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUCVW8JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVWUJVW8JwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Since the loop is changed to while(0), some handling of sync_blocks in raid1_sync_request() is no longer needed and can be removed. No functional changes. Signed-off-by: Li Nan --- drivers/md/raid1.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f01bab41da95..432ab96ec1cc 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2976,7 +2976,6 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, if (max_sector > sector_nr + good_sectors) max_sector =3D sector_nr + good_sectors; nr_sectors =3D 0; - sync_blocks =3D 0; do { struct folio *folio; int len =3D RESYNC_BLOCK_SIZE; @@ -2984,15 +2983,13 @@ static sector_t raid1_sync_request(struct mddev *md= dev, sector_t sector_nr, len =3D (max_sector - sector_nr) << 9; if (len =3D=3D 0) break; - if (sync_blocks =3D=3D 0) { - if (!md_bitmap_start_sync(mddev, sector_nr, - &sync_blocks, still_degraded) && - !conf->fullsync && - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) - break; - if ((len >> 9) > sync_blocks) - len =3D sync_blocks<<9; - } + if (!md_bitmap_start_sync(mddev, sector_nr, + &sync_blocks, still_degraded) && + !conf->fullsync && + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) + break; + if ((len >> 9) > sync_blocks) + len =3D sync_blocks<<9; =20 for (i =3D 0 ; i < conf->raid_disks * 2; i++) { struct resync_folio *rf; @@ -3011,7 +3008,6 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, } nr_sectors +=3D len>>9; sector_nr +=3D len>>9; - sync_blocks -=3D (len>>9); } while (0); =20 r1_bio->sectors =3D nr_sectors; --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C129C328B4F; Wed, 17 Dec 2025 12:11:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973480; cv=none; b=t/WN0ktpaFvbm+mKnDCfMxovxDnko5LStMU0tKl43vq4LlPZCGDDI0Vrhsb6rs2fRnAL7OmUsCgALw8OBtg/og9BywVsm4MWMM37Y9kX5k/QSFW/VQo4TqoX03VBX4lFq0iutH1Rhjs+k4anSJvwvHvWhm0VzMq5ZrWRfbzoANo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973480; c=relaxed/simple; bh=wv4bnA6Uyw6eAJKHY+OKOkSYv6uFuYwxh6jRpUf/K8g=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=poXywVLOPSV6oHmqEv6+Ab+FFvUW2c/wsBjqvQRuLSSoSmmuWupuUxC6NElJLxIurWnUvE685bF47WjfRYreLcXOftYJqaXrWA3bnabsV5lfLa61ybUj/bKBiLNDaONL2sEbhZNzVkmanbfNJ6vPc8o2BvXpAz2Y8m4WPNqVgQU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh54w9ZzKHN4F; Wed, 17 Dec 2025 20:11:01 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id BBD0540570; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S13; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 09/15] md/raid1: fix IO error at logical block size granularity Date: Wed, 17 Dec 2025 20:00:07 +0800 Message-Id: <20251217120013.2616531-10-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S13 X-Coremail-Antispam: 1UD129KBjvJXoW7ur15uFW3ZFy5Ar15GF4UArb_yoW8Zr1kpa 17J3yvvw4UGrWjyr4DAryqy3WFk34SkFWUGrs5G3y2gryDZ3sagFyUGayYgF10kr9ayayU Wwnrtr4rC3W7tF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVWUCVW8JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVWUJVW8JwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan RAID1 currently fixes IO error at PAGE_SIZE granularity. Fix at smaller granularity can handle more errors, and RAID will support logical block sizes larger than PAGE_SIZE in the future, where PAGE_SIZE IO will fail. Switch IO error fix granularity to logical block size. Signed-off-by: Li Nan --- drivers/md/raid1.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 432ab96ec1cc..c1580aea4189 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2114,7 +2114,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) { /* Try some synchronous reads of other devices to get * good data, much like with normal read errors. Only - * read into the pages we already have so we don't + * read into the block we already have so we don't * need to re-issue the read request. * We don't need to freeze the array, because being in an * active sync request, there is no normal IO, and @@ -2145,13 +2145,11 @@ static int fix_sync_read_error(struct r1bio *r1_bio) } =20 while(sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); int d =3D r1_bio->read_disk; int success =3D 0; int start; =20 - if (s > (PAGE_SIZE>>9)) - s =3D PAGE_SIZE >> 9; do { if (r1_bio->bios[d]->bi_end_io =3D=3D end_sync_read) { /* No rcu protection needed here devices @@ -2190,7 +2188,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (abort) return 0; =20 - /* Try next page */ + /* Try next block */ sectors -=3D s; sect +=3D s; off +=3D s << 9; @@ -2379,14 +2377,11 @@ static void fix_read_error(struct r1conf *conf, str= uct r1bio *r1_bio) } =20 while(sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); int d =3D read_disk; int success =3D 0; int start; =20 - if (s > (PAGE_SIZE>>9)) - s =3D PAGE_SIZE >> 9; - do { rdev =3D conf->mirrors[d].rdev; if (rdev && --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 928C9342CAE; Wed, 17 Dec 2025 12:11:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; cv=none; b=aBwyX/nrmsRD0XAR51PL7KWeuYkPujqbOmxAW/15He1MGKtTw060zjZKFs6eywSnTayHDxoblyWRAl3klEK6S9IPq+wVXgSy7+f3bVDz5XWOBeZd1ONp632gsqr1Tkd5ioF7nweqOOjJK7YDJAxlZmVAIAVVUSjxhvN8fk/DgEA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; c=relaxed/simple; bh=dn1XmBjdmjh8jSKauWoqn5MQQDtsChqe41w5jmLrM28=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=imgdRtRR+PrS+bRMbzgA+bmzfqg8vb2wjuwqUlkoEqmB601OqA9frluvLHVLOSa1NYn4jyU6Y2kf1qZ02oLt+8FvVZBlJy4eMJzUUiolHc36C5sFp5UV0WH/6Am16rW11bHNSJfYG4DI2wKgorLLGKXsyTritRaXydK+B9pJgAM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=none smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=none smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.177]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh54z31zKHN4P; Wed, 17 Dec 2025 20:11:01 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id C45A140593; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S14; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 10/15] md/raid10: fix IO error at logical block size granularity Date: Wed, 17 Dec 2025 20:00:08 +0800 Message-Id: <20251217120013.2616531-11-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S14 X-Coremail-Antispam: 1UD129KBjvJXoW7Wry7KrWfWw4DGF43AryDtrb_yoW5Jr1xpa 9IkF13urWDGa1UZrnrAFWDX3WFk3y5tFWUtry8Gw4IgF9xtr98KF4UXFWYgry5CFW3Zw10 gw1DKr4xu3WkJF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVW8JVWxJwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan RAID10 currently fixes IO error at PAGE_SIZE granularity. Fix at smaller granularity can handle more errors, and RAID will support logical block sizes larger than PAGE_SIZE in the future, where PAGE_SIZE IO will fail. Switch IO error fix granularity to logical block size. Signed-off-by: Li Nan --- drivers/md/raid10.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a03afa9a6a5b..4beea6ee9dfc 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2452,7 +2452,7 @@ static void sync_request_write(struct mddev *mddev, s= truct r10bio *r10_bio) static void fix_recovery_read_error(struct r10bio *r10_bio) { /* We got a read error during recovery. - * We repeat the read in smaller page-sized sections. + * We repeat the read in smaller logical_block_sized sections. * If a read succeeds, write it to the new device or record * a bad block if we cannot. * If a read fails, record a bad block on both old and @@ -2468,14 +2468,11 @@ static void fix_recovery_read_error(struct r10bio *= r10_bio) struct folio *folio =3D get_resync_folio(bio)->folio; =20 while (sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); struct md_rdev *rdev; sector_t addr; int ok; =20 - if (s > (PAGE_SIZE>>9)) - s =3D PAGE_SIZE >> 9; - rdev =3D conf->mirrors[dr].rdev; addr =3D r10_bio->devs[0].addr + sect; ok =3D sync_folio_io(rdev, @@ -2619,14 +2616,11 @@ static void fix_read_error(struct r10conf *conf, st= ruct mddev *mddev, struct r10 } =20 while(sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); int sl =3D slot; int success =3D 0; int start; =20 - if (s > (PAGE_SIZE>>9)) - s =3D PAGE_SIZE >> 9; - do { d =3D r10_bio->devs[sl].devnum; rdev =3D conf->mirrors[d].rdev; @@ -4925,16 +4919,14 @@ static int handle_reshape_read_error(struct mddev *= mddev, __raid10_find_phys(&conf->prev, r10b); =20 while (sectors) { - int s =3D sectors; + int s =3D min_t(int, sectors, mddev->logical_block_size >> 9); int success =3D 0; int first_slot =3D slot; =20 - if (s > (PAGE_SIZE >> 9)) - s =3D PAGE_SIZE >> 9; - while (!success) { int d =3D r10b->devs[slot].devnum; struct md_rdev *rdev =3D conf->mirrors[d].rdev; + if (rdev =3D=3D NULL || test_bit(Faulty, &rdev->flags) || !test_bit(In_sync, &rdev->flags)) --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 9294C342CB0; Wed, 17 Dec 2025 12:11:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; cv=none; b=r6V8gBmO6jvNs9h0z3onJbC+oQQLor87OKyVnFjEKFQklqxOoNpFaXkmw95lD2n3dqwr/FiGODvj0z/okGHDwqf/w8ABdw2KjSpUqNKRza8S7CsGGpkIdb42aTpqrz/mRkywqXLwP2F3lCLEnzOajLxr82DGqAVZNkQ4vDJMBuQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; c=relaxed/simple; bh=MuYHDrnIIZGzUe6NBim+9ZcrPCYMcsiQkKM1rDmjZic=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=OBEoq/mRmWuAZNCx8KfXMTDRbt9ZgmQ6A/MW+4jHtWco0gpU9wkyrlRcqq/CReKYl9nRZOxnEnMIvO2DpVHkcXx+P0v51Uy0C5BROPY26xNFmb0Dhq9DUFpwWq5I+FKUbf+1/PAD3JKU/O6G+yzA3IsBLfwjNZeW85wFX3CbTzo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh55WlZzKHN4V; Wed, 17 Dec 2025 20:11:01 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id D114F40576; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S15; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 11/15] md/raid1,raid10: clean up resync_fetch_folio Date: Wed, 17 Dec 2025 20:00:09 +0800 Message-Id: <20251217120013.2616531-12-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S15 X-Coremail-Antispam: 1UD129KBjvJXoW7Cw4UGw13ZFyfJrykKFyfWFg_yoW8Kr45pa 1jgry3Zw48Kay8Aw4DZF48Ca1Fka43trWjyFWxu3s3ZFy3XFyqgF4UXay8GFs8XF98Ka4F qa47tay5Wa1rAF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UMIIF0xvE42xK8VAvwI 8IcIk0rVWUJVWUCwCI42IY6I8E87Iv67AKxVW8JVWxJwCI42IY6I8E87Iv6xkF7I0E14v2 6r4j6r4UJbIYCTnIWIevJa73UjIFyTuYvjfUYVyIDUUUU X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan The helper resync_fetch_folio() only returns the folio member without any additional logic. Clean it up by accessing rf->folio directly. Signed-off-by: Li Nan --- drivers/md/raid1-10.c | 7 +------ drivers/md/raid1.c | 2 +- drivers/md/raid10.c | 3 +-- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 568ab002691f..2ff1f8855900 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -55,11 +55,6 @@ static inline void resync_free_folio(struct resync_folio= *rf) folio_put(rf->folio); } =20 -static inline struct folio *resync_fetch_folio(struct resync_folio *rf) -{ - return rf->folio; -} - /* * 'strct resync_folio' stores actual pages used for doing the resync * IO, and it is per-bio, so make .bi_private points to it. @@ -74,7 +69,7 @@ static void md_bio_reset_resync_folio(struct bio *bio, st= ruct resync_folio *rf, int size) { /* initialize bvec table again */ - if (WARN_ON(!bio_add_folio(bio, resync_fetch_folio(rf), + if (WARN_ON(!bio_add_folio(bio, rf->folio, min_t(int, size, RESYNC_BLOCK_SIZE), 0))) { bio->bi_status =3D BLK_STS_RESOURCE; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c1580aea4189..cf87f36fb7d8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2992,7 +2992,7 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, bio =3D r1_bio->bios[i]; rf =3D get_resync_folio(bio); if (bio->bi_end_io) { - folio =3D resync_fetch_folio(rf); + folio =3D rf->folio; =20 /* * won't fail because the vec table is big diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 4beea6ee9dfc..5afe270f6941 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3630,9 +3630,8 @@ static sector_t raid10_sync_request(struct mddev *mdd= ev, sector_t sector_nr, break; for (bio=3D biolist ; bio ; bio=3Dbio->bi_next) { struct resync_folio *rf =3D get_resync_folio(bio); - struct folio *folio =3D resync_fetch_folio(rf); =20 - if (WARN_ON(!bio_add_folio(bio, folio, len, 0))) { + if (WARN_ON(!bio_add_folio(bio, rf->folio, len, 0))) { bio->bi_status =3D BLK_STS_RESOURCE; bio_endio(bio); *skipped =3D 1; --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CEE013431EC; Wed, 17 Dec 2025 12:11:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; cv=none; b=Jq8JqZjth6/croHLSEd+8yw/jcrWjOHx7KvULx7LCUgT4z8MebOtv274zVGmd932Vnw16MGWujT8yqWNYSFnYMLDTcWIlI6vQjOvJx1Dc8pxFHZ8zBJmlEa+GsNakDkgrjddwPtHLCpDYOEkQdMf8VT975e5kDAYEdmtGVEOKyQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; c=relaxed/simple; bh=8zC8ckEZPOSqxOznjVN8+F9Y212m42xN5I14oi6qhRg=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=YdKqX+sHIQv7RgQKUn2rF7S4kbBRxK8jNPPVMemfNaxfslP4HKfQzWBhOOIUr8SEA6MZzmGv06w2KhHH6r9G4s5+HWZBUtR9c6j7eFlJ33jTv+J+ByQFkHRJLEUvttEp+9SCFgwZTgYWBWHSF2WF0ws8FJbf3SPYk1bMHPrGOi4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh56Bb2zKHN4Z; Wed, 17 Dec 2025 20:11:01 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id E96B24056E; Wed, 17 Dec 2025 20:11:10 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S16; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 12/15] md: clean up resync_free_folio Date: Wed, 17 Dec 2025 20:00:10 +0800 Message-Id: <20251217120013.2616531-13-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S16 X-Coremail-Antispam: 1UD129KBjvJXoW7tFW5uFyxCrW3WF18KFykKrg_yoW8uF17pa n8Wr9Iva18GFW8AFs8ZF4UZFy5C3y7J3yUCFWxuws3ZFy3ZFyDWa1UJa4UKr4DXrn8Ga4I qFn8GrW3W3W5JF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQF14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJwCI42IY6xAIw20EY4 v20xvaj40_Jr0_JF4lIxAIcVC2z280aVAFwI0_Gr0_Cr1lIxAIcVC2z280aVCY1x0267AK xVW8Jr0_Cr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan The resync_free_folio() helper only wraps a single folio_put() call, so remove it and call folio_put() directly. Signed-off-by: Li Nan --- drivers/md/raid1-10.c | 5 ----- drivers/md/raid1.c | 4 ++-- drivers/md/raid10.c | 4 ++-- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index 2ff1f8855900..ffbd7bd0f6e8 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -50,11 +50,6 @@ static inline int resync_alloc_folio(struct resync_folio= *rf, return 0; } =20 -static inline void resync_free_folio(struct resync_folio *rf) -{ - folio_put(rf->folio); -} - /* * 'strct resync_folio' stores actual pages used for doing the resync * IO, and it is per-bio, so make .bi_private points to it. diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index cf87f36fb7d8..38f86de45dea 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -199,7 +199,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *d= ata) =20 out_free_folio: while (--j >=3D 0) - resync_free_folio(&rfs[j]); + folio_put(rfs[j].folio); =20 out_free_bio: while (++j < conf->raid_disks * 2) { @@ -222,7 +222,7 @@ static void r1buf_pool_free(void *__r1_bio, void *data) =20 for (i =3D conf->raid_disks * 2; i--; ) { rf =3D get_resync_folio(r1bio->bios[i]); - resync_free_folio(rf); + folio_put(rf->folio); bio_uninit(r1bio->bios[i]); kfree(r1bio->bios[i]); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5afe270f6941..c3ef2ea38b08 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -204,7 +204,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) =20 out_free_pages: while (--j >=3D 0) - resync_free_folio(&rfs[j]); + folio_put(rfs[j].folio); =20 j =3D 0; out_free_bio: @@ -234,7 +234,7 @@ static void r10buf_pool_free(void *__r10_bio, void *dat= a) =20 if (bio) { rf =3D get_resync_folio(bio); - resync_free_folio(rf); + folio_put(rf->folio); bio_uninit(bio); kfree(bio); } --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CED2233E35D; Wed, 17 Dec 2025 12:11:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; cv=none; b=fh008bKMZjsB7OMAfBjYLljPntXsYuiomz4i6l4eHh7t2UQltsBaXl99ihoWvBi4XqFV/SZA/EphvESmMkM7s7+OZdE3jxl/ofO7+Xr0EXEoWQ7qIfaGWfTWzIDwRd7/xRmBap8w8nCXU9j+w+nPCjX4JwZdYUih0a5SrqOAWR0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; c=relaxed/simple; bh=2lovPEcp877LJDlQA4BGV3f9E2/j2yk4BPX0zKENcjg=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=SPK5Fq2HdHLy5+XvGtu6j8PGSZvRF3LhDt8P9iUU1yDiBQdlcgKLnLqKcK+ZY+1aaofNPEtc2xOE5qXS+I9mGQOq72Ol4w2l6EDYqB2Gru0yU1kkEQP6juhdaeq5AP1K9oVGm9xTnP+Xt7kEs6euP9JcbFI1L88SDWch7p93vy4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.170]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh56jxgzKHN4w; Wed, 17 Dec 2025 20:11:01 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 073054056F; Wed, 17 Dec 2025 20:11:11 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S17; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 13/15] md/raid1: clean up sync IO size calculation in raid1_sync_request Date: Wed, 17 Dec 2025 20:00:11 +0800 Message-Id: <20251217120013.2616531-14-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S17 X-Coremail-Antispam: 1UD129KBjvJXoW7Cw1DAry7WFWUKr1DWFWfZrb_yoW8AF15pw nxGr9Ig3yrGa13XwnxAa4UCF1FkFy3KrWUJFWSgwnxWF97CrnFka18XF1YgFyDZa43trZ8 X34kAr45A3WkJaUanT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQF14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJwCI42IY6xAIw20EY4 v20xvaj40_Jr0_JF4lIxAIcVC2z280aVAFwI0_Gr0_Cr1lIxAIcVC2z280aVCY1x0267AK xVW8Jr0_Cr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Use 'nr_sectors' directly for sync IO size calculation. Prepare folio allocation failure fallback. No functional changes. Signed-off-by: Li Nan --- drivers/md/raid1.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 38f86de45dea..2be2277d4e7e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2970,21 +2970,19 @@ static sector_t raid1_sync_request(struct mddev *md= dev, sector_t sector_nr, max_sector =3D mddev->resync_max; /* Don't do IO beyond here */ if (max_sector > sector_nr + good_sectors) max_sector =3D sector_nr + good_sectors; - nr_sectors =3D 0; do { struct folio *folio; - int len =3D RESYNC_BLOCK_SIZE; - if (sector_nr + (len>>9) > max_sector) - len =3D (max_sector - sector_nr) << 9; - if (len =3D=3D 0) + + nr_sectors =3D max_sector - sector_nr; + if (nr_sectors =3D=3D 0) break; if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, still_degraded) && !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) break; - if ((len >> 9) > sync_blocks) - len =3D sync_blocks<<9; + if (nr_sectors > sync_blocks) + nr_sectors =3D sync_blocks; =20 for (i =3D 0 ; i < conf->raid_disks * 2; i++) { struct resync_folio *rf; @@ -2998,11 +2996,10 @@ static sector_t raid1_sync_request(struct mddev *md= dev, sector_t sector_nr, * won't fail because the vec table is big * enough to hold all these pages */ - bio_add_folio_nofail(bio, folio, len, 0); + bio_add_folio_nofail(bio, folio, nr_sectors << 9, 0); } } - nr_sectors +=3D len>>9; - sector_nr +=3D len>>9; + sector_nr +=3D nr_sectors; } while (0); =20 r1_bio->sectors =3D nr_sectors; --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id DFB9E343201; Wed, 17 Dec 2025 12:11:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; cv=none; b=UXR0qPge5ZzE5zt4Z0F2eX9eBT6k5Fp5N/ZyGxDACwj9KUXxvf6/ImS+Z/CJqd6vMiQ3ZDvz400n/rAXCws3EM62s0mfBCoAzX+TYUmZ4vQ1rrgzmKAP9De5GBSXtsMcQ8Mf+xcoG6M6cEKYp2coW1lMjzT1jlBqMydEacAXVug= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; c=relaxed/simple; bh=pQBpAPQxfq/M/ibNQjzbJURavGAoUDSIa2Jdzm/a1Uk=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=rA9sCDmg5NpOKKzRDXYStfKwdYZmKIQ59lFVL9OYJWd+PW59A1/hyL9WTfphQ9RszcluU3tYCXu5Rca1TOJyICQ3kUTu7YaT2fhjjbgaz+zqUmKFiiEIDB9kGAumvq695NwDG2jM+JzbgWSf/TdEaHQStbOXfgQlWGZgeSlvbLo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh60PMjzKHN4p; Wed, 17 Dec 2025 20:11:02 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 1FBCB40576; Wed, 17 Dec 2025 20:11:11 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S18; Wed, 17 Dec 2025 20:11:10 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 14/15] md/raid10: clean up sync IO size calculation in raid10_sync_request Date: Wed, 17 Dec 2025 20:00:12 +0800 Message-Id: <20251217120013.2616531-15-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S18 X-Coremail-Antispam: 1UD129KBjvJXoW7Cw1DAry7WF4fCF47Jw18Xwb_yoW8Gr4fpF 4DGr97W3y8Ja18Zw45J3WUu3WFya93trWUAr4xW3Z3WF1fCr9Fka18J3WFgFyDXFy3GrWY qw18Ar45A3WkJF7anT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQF14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJwCI42IY6xAIw20EY4 v20xvaj40_Jr0_JF4lIxAIcVC2z280aVAFwI0_Gr0_Cr1lIxAIcVC2z280aVCY1x0267AK xVW8Jr0_Cr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan Use 'nr_sectors' directly for sync IO size calculation. Prepare folio allocation failure fallback. No functional changes. Signed-off-by: Li Nan --- drivers/md/raid10.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c3ef2ea38b08..f3e10e20ebb1 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3618,28 +3618,24 @@ static sector_t raid10_sync_request(struct mddev *m= ddev, sector_t sector_nr, } } =20 - nr_sectors =3D 0; if (sector_nr + max_sync < max_sector) max_sector =3D sector_nr + max_sync; do { - int len =3D RESYNC_BLOCK_SIZE; + nr_sectors =3D max_sector - sector_nr; =20 - if (sector_nr + (len>>9) > max_sector) - len =3D (max_sector - sector_nr) << 9; - if (len =3D=3D 0) + if (nr_sectors =3D=3D 0) break; for (bio=3D biolist ; bio ; bio=3Dbio->bi_next) { struct resync_folio *rf =3D get_resync_folio(bio); =20 - if (WARN_ON(!bio_add_folio(bio, rf->folio, len, 0))) { + if (WARN_ON(!bio_add_folio(bio, rf->folio, nr_sectors << 9, 0))) { bio->bi_status =3D BLK_STS_RESOURCE; bio_endio(bio); *skipped =3D 1; - return len; + return nr_sectors << 9; } } - nr_sectors +=3D len>>9; - sector_nr +=3D len>>9; + sector_nr +=3D nr_sectors;; } while (0); r10_bio->sectors =3D nr_sectors; =20 --=20 2.39.2 From nobody Mon Feb 9 04:27:43 2026 Received: from dggsgout12.his.huawei.com (dggsgout12.his.huawei.com [45.249.212.56]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id DFCD8343204; Wed, 17 Dec 2025 12:11:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.56 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; cv=none; b=MoeEcy/KwNN0K63B0dWUepUEy9e7HQCRfhuRb8soDOUDZiem4xvVK7TdX7os2bfCUDgmRuy7t5NKaDCeEb1woPpYMxQv6sZJTU0oL08Qj8NY6pNaqFNFPNm2t16F+G8D0MTc5F9lwQooZjsVlg/Hm1Ch67K+folMU9XKpBJw8nQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1765973485; c=relaxed/simple; bh=2Ab+QkKnuMIQmLYghY2bwomLU3UwvZ3X9rWU+Rsk0E0=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=nSmhN/m8xGAP3c5HBXXzr7fJTQXUJ/ehwGqgAe9Ofep8YE177nUHtwtI4Z+zT3dDYEvU0680sKfYmDiYtdUTzxRY05ujgLRNycNZsXtnHuM+g09R/yQ+eT6m9uCRx9x2WFrg8eQIVvYFiTvdwC9PF+lTy6YKm/CcfYB3Mf8imF0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=45.249.212.56 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.19.163.198]) by dggsgout12.his.huawei.com (SkyGuard) with ESMTPS id 4dWXh60pdYzKHN4w; Wed, 17 Dec 2025 20:11:02 +0800 (CST) Received: from mail02.huawei.com (unknown [10.116.40.128]) by mail.maildlp.com (Postfix) with ESMTP id 32D5E40577; Wed, 17 Dec 2025 20:11:11 +0800 (CST) Received: from huaweicloud.com (unknown [10.50.87.129]) by APP4 (Coremail) with SMTP id gCh0CgAXd_fdnUJp6F0JAg--.52527S19; Wed, 17 Dec 2025 20:11:11 +0800 (CST) From: linan666@huaweicloud.com To: song@kernel.org, yukuai@fnnas.com Cc: linux-raid@vger.kernel.org, linux-kernel@vger.kernel.org, xni@redhat.com, linan666@huaweicloud.com, yangerkun@huawei.com, yi.zhang@huawei.com Subject: [PATCH 15/15] md/raid1,raid10: fall back to smaller order if sync folio alloc fails Date: Wed, 17 Dec 2025 20:00:13 +0800 Message-Id: <20251217120013.2616531-16-linan666@huaweicloud.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: <20251217120013.2616531-1-linan666@huaweicloud.com> References: <20251217120013.2616531-1-linan666@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: gCh0CgAXd_fdnUJp6F0JAg--.52527S19 X-Coremail-Antispam: 1UD129KBjvJXoW3XFykWr48XryxtFW8AF47urg_yoW7CrWUpa 1UGrySv34rtFWfXa93Jr1DuF1Fk34xWFWUCFnrWwn7u3WfWryq9F4UXay5WF1DZFn8AFyj q3WDAr45uFs3JaUanT9S1TB71UUUUU7qnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQF14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JF0E3s1l82xGYI kIc2x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2 z4x0Y4vE2Ix0cI8IcVAFwI0_tr0E3s1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F 4UJwA2z4x0Y4vEx4A2jsIE14v26rxl6s0DM28EF7xvwVC2z280aVCY1x0267AKxVW0oVCq 3wAac4AC62xK8xCEY4vEwIxC4wAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0V AKzVAqx4xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Gr1j6F4U JwAm72CE4IkC6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20V AGYxC7M4kE6xkIj40Ew7xC0wCY1x0262kKe7AKxVWUAVWUtwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_Jw0_GFylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJwCI42IY6xAIw20EY4 v20xvaj40_Jr0_JF4lIxAIcVC2z280aVAFwI0_Gr0_Cr1lIxAIcVC2z280aVCY1x0267AK xVW8Jr0_Cr1UYxBIdaVFxhVjvjDU0xZFpf9x0JUvhFsUUUUU= X-CM-SenderInfo: polqt0awwwqx5xdzvxpfor3voofrz/ Content-Type: text/plain; charset="utf-8" From: Li Nan RESYNC_BLOCK_SIZE (64K) has higher allocation failure chance than 4k, so retry with lower orders to improve allocation reliability. A r1/10_bio may have different rf->folio orders. Use minimum order as r1/10_bio sectors to prevent exceeding size when adding folio to IO later. Signed-off-by: Li Nan --- drivers/md/raid1-10.c | 14 +++++++++++--- drivers/md/raid1.c | 13 +++++++++---- drivers/md/raid10.c | 28 ++++++++++++++++++++++++++-- 3 files changed, 46 insertions(+), 9 deletions(-) diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c index ffbd7bd0f6e8..e966d11a81e7 100644 --- a/drivers/md/raid1-10.c +++ b/drivers/md/raid1-10.c @@ -41,12 +41,20 @@ static void rbio_pool_free(void *rbio, void *data) } =20 static inline int resync_alloc_folio(struct resync_folio *rf, - gfp_t gfp_flags) + gfp_t gfp_flags, int *order) { - rf->folio =3D folio_alloc(gfp_flags, get_order(RESYNC_BLOCK_SIZE)); - if (!rf->folio) + struct folio *folio; + + do { + folio =3D folio_alloc(gfp_flags, *order); + if (folio) + break; + } while (--(*order) > 0); + + if (!folio) return -ENOMEM; =20 + rf->folio =3D folio; return 0; } =20 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 2be2277d4e7e..a9af40cda7dd 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -149,6 +149,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *d= ata) int need_folio; int j; struct resync_folio *rfs; + int order =3D get_order(RESYNC_BLOCK_SIZE); =20 r1_bio =3D r1bio_pool_alloc(gfp_flags, conf); if (!r1_bio) @@ -182,7 +183,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *d= ata) struct resync_folio *rf =3D &rfs[j]; =20 if (j < need_folio) { - if (resync_alloc_folio(rf, gfp_flags)) + if (resync_alloc_folio(rf, gfp_flags, &order)) goto out_free_folio; } else { memcpy(rf, &rfs[0], sizeof(*rf)); @@ -193,6 +194,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *d= ata) r1_bio->bios[j]->bi_private =3D rf; } =20 + r1_bio->sectors =3D 1 << (order + PAGE_SECTORS_SHIFT); r1_bio->master_bio =3D NULL; =20 return r1_bio; @@ -2767,7 +2769,7 @@ static sector_t raid1_sync_request(struct mddev *mdde= v, sector_t sector_nr, int write_targets =3D 0, read_targets =3D 0; sector_t sync_blocks; bool still_degraded =3D false; - int good_sectors =3D RESYNC_SECTORS; + int good_sectors; int min_bad =3D 0; /* number of sectors that are bad in all devices */ int idx =3D sector_to_idx(sector_nr); =20 @@ -2849,8 +2851,11 @@ static sector_t raid1_sync_request(struct mddev *mdd= ev, sector_t sector_nr, r1_bio->sector =3D sector_nr; r1_bio->state =3D 0; set_bit(R1BIO_IsSync, &r1_bio->state); - /* make sure good_sectors won't go across barrier unit boundary */ - good_sectors =3D align_to_barrier_unit_end(sector_nr, good_sectors); + /* + * make sure good_sectors won't go across barrier unit boundary. + * r1_bio->sectors <=3D RESYNC_SECTORS. + */ + good_sectors =3D align_to_barrier_unit_end(sector_nr, r1_bio->sectors); =20 for (i =3D 0; i < conf->raid_disks * 2; i++) { struct md_rdev *rdev; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f3e10e20ebb1..f0e91090097a 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -135,6 +135,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) int j; int nalloc, nalloc_rf; struct resync_folio *rfs; + int order =3D get_order(RESYNC_BLOCK_SIZE); =20 r10_bio =3D r10bio_pool_alloc(gfp_flags, conf); if (!r10_bio) @@ -185,7 +186,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) =20 if (!j || test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) { - if (resync_alloc_folio(rf, gfp_flags)) + if (resync_alloc_folio(rf, gfp_flags, &order)) goto out_free_pages; } else { memcpy(rf, &rfs[0], sizeof(*rf)); @@ -200,6 +201,7 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *= data) } } =20 + r10_bio->sectors =3D 1 << (order + PAGE_SECTORS_SHIFT); return r10_bio; =20 out_free_pages: @@ -3374,6 +3376,15 @@ static sector_t raid10_sync_request(struct mddev *md= dev, sector_t sector_nr, continue; } } + + /* + * RESYNC_BLOCK_SIZE folio might alloc failed in + * resync_alloc_folio(). Fall back to smaller sync + * size if needed. + */ + if (max_sync > r10_bio->sectors) + max_sync =3D r10_bio->sectors; + any_working =3D 1; bio =3D r10_bio->devs[0].bio; bio->bi_next =3D biolist; @@ -3525,7 +3536,15 @@ static sector_t raid10_sync_request(struct mddev *md= dev, sector_t sector_nr, } if (sync_blocks < max_sync) max_sync =3D sync_blocks; + r10_bio =3D raid10_alloc_init_r10buf(conf); + /* + * RESYNC_BLOCK_SIZE folio might alloc failed in resync_alloc_folio(). + * Fall back to smaller sync size if needed. + */ + if (max_sync > r10_bio->sectors) + max_sync =3D r10_bio->sectors; + r10_bio->state =3D 0; =20 r10_bio->mddev =3D mddev; @@ -4702,7 +4721,12 @@ static sector_t reshape_request(struct mddev *mddev,= sector_t sector_nr, r10_bio->mddev =3D mddev; r10_bio->sector =3D sector_nr; set_bit(R10BIO_IsReshape, &r10_bio->state); - r10_bio->sectors =3D last - sector_nr + 1; + /* + * RESYNC_BLOCK_SIZE folio might alloc failed in + * resync_alloc_folio(). Fall back to smaller sync + * size if needed. + */ + r10_bio->sectors =3D min_t(int, r10_bio->sectors, last - sector_nr + 1); rdev =3D read_balance(conf, r10_bio, &max_sectors); BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); =20 --=20 2.39.2