Open code set_io_stripe() for RAID56, as it a) uses a different method to
calculate the stripe_index and b) doesn't need to go through raid-stripe-tree
mapping code.
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
---
fs/btrfs/volumes.c | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 946333c8c331..7df991a81c4b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6670,13 +6670,16 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
btrfs_stripe_nr_to_offset(io_geom.stripe_nr *
nr_data_stripes(map));
for (int i = 0; i < io_geom.num_stripes; i++) {
- ret = set_io_stripe(fs_info, op, logical, length,
- &bioc->stripes[i], map,
- (i + io_geom.stripe_nr) % io_geom.num_stripes,
- io_geom.stripe_offset,
- io_geom.stripe_nr);
- if (ret < 0)
- break;
+ struct btrfs_io_stripe *dst = &bioc->stripes[i];
+ u32 stripe_index;
+
+ stripe_index =
+ (i + io_geom.stripe_nr) % io_geom.num_stripes;
+ dst->dev = map->stripes[stripe_index].dev;
+ dst->physical =
+ map->stripes[stripe_index].physical +
+ io_geom.stripe_offset +
+ btrfs_stripe_nr_to_offset(io_geom.stripe_nr);
}
} else {
/*
--
2.43.0
On Tue, Dec 12, 2023 at 04:38:09AM -0800, Johannes Thumshirn wrote:
> Open code set_io_stripe() for RAID56, as it a) uses a different method to
> calculate the stripe_index and b) doesn't need to go through raid-stripe-tree
> mapping code.
Looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
I think raid stripe tree handling also really should move out of
set_io_stripe. Below is the latest I have, although it probably won't
apply to your tree:
---
From ac208da48d7f9d11eef8a01ac0c6fbf9681665b5 Mon Sep 17 00:00:00 2001
From: Christoph Hellwig <hch@lst.de>
Date: Thu, 22 Jun 2023 05:53:13 +0200
Subject: btrfs: move raid-stripe-tree handling out of set_io_stripe
set_io_stripe gets a little too complicated with the raid-stripe-tree
handling. Move it out into the only callers that actually needs it.
The only reads with more than a single stripe is the parity raid recovery
case thast will need very special handling anyway once implemented.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
fs/btrfs/volumes.c | 61 ++++++++++++++++++++--------------------------
1 file changed, 27 insertions(+), 34 deletions(-)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 30ee5d1670d034..e32eefa242b0a4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6233,22 +6233,12 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op,
return U64_MAX;
}
-static int set_io_stripe(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
- u64 logical, u64 *length, struct btrfs_io_stripe *dst,
- struct map_lookup *map, u32 stripe_index,
- u64 stripe_offset, u64 stripe_nr)
+static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map,
+ u32 stripe_index, u64 stripe_offset, u32 stripe_nr)
{
dst->dev = map->stripes[stripe_index].dev;
-
- if (op == BTRFS_MAP_READ &&
- btrfs_use_raid_stripe_tree(fs_info, map->type))
- return btrfs_get_raid_extent_offset(fs_info, logical, length,
- map->type, stripe_index,
- dst);
-
dst->physical = map->stripes[stripe_index].physical +
stripe_offset + ((u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT);
- return 0;
}
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
@@ -6423,15 +6413,24 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
* physical block information on the stack instead of allocating an
* I/O context structure.
*/
- if (smap && num_alloc_stripes == 1 &&
- !(btrfs_use_raid_stripe_tree(fs_info, map->type) &&
- op != BTRFS_MAP_READ) &&
- !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)) {
- ret = set_io_stripe(fs_info, op, logical, length, smap, map,
- stripe_index, stripe_offset, stripe_nr);
- *mirror_num_ret = mirror_num;
- *bioc_ret = NULL;
- goto out;
+ if (smap && num_alloc_stripes == 1) {
+ if (op == BTRFS_MAP_READ &&
+ btrfs_use_raid_stripe_tree(fs_info, map->type)) {
+ ret = btrfs_get_raid_extent_offset(fs_info, logical,
+ length, map->type,
+ stripe_index, smap);
+ *mirror_num_ret = mirror_num;
+ *bioc_ret = NULL;
+ goto out;
+ } else if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
+ mirror_num == 0) {
+ set_io_stripe(smap, map, stripe_index, stripe_offset,
+ stripe_nr);
+ *mirror_num_ret = mirror_num;
+ *bioc_ret = NULL;
+ ret = 0;
+ goto out;
+ }
}
bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes);
@@ -6448,6 +6447,8 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
*
* It's still mostly the same as other profiles, just with extra rotation.
*/
+ ASSERT(op != BTRFS_MAP_READ ||
+ btrfs_use_raid_stripe_tree(fs_info, map->type));
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
(op != BTRFS_MAP_READ || mirror_num > 1)) {
/*
@@ -6461,29 +6462,21 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
bioc->full_stripe_logical = em->start +
((stripe_nr * data_stripes) << BTRFS_STRIPE_LEN_SHIFT);
for (i = 0; i < num_stripes; i++)
- ret = set_io_stripe(fs_info, op, logical, length,
- &bioc->stripes[i], map,
- (i + stripe_nr) % num_stripes,
- stripe_offset, stripe_nr);
+ set_io_stripe(&bioc->stripes[i], map,
+ (i + stripe_nr) % num_stripes,
+ stripe_offset, stripe_nr);
} else {
/*
* For all other non-RAID56 profiles, just copy the target
* stripe into the bioc.
*/
for (i = 0; i < num_stripes; i++) {
- ret = set_io_stripe(fs_info, op, logical, length,
- &bioc->stripes[i], map, stripe_index,
- stripe_offset, stripe_nr);
+ set_io_stripe(&bioc->stripes[i], map, stripe_index,
+ stripe_offset, stripe_nr);
stripe_index++;
}
}
- if (ret) {
- *bioc_ret = NULL;
- btrfs_put_bioc(bioc);
- goto out;
- }
-
if (op != BTRFS_MAP_READ)
max_errors = btrfs_chunk_max_errors(map);
--
2.39.2
On 13.12.23 09:58, Christoph Hellwig wrote:
>
> I think raid stripe tree handling also really should move out of
> set_io_stripe. Below is the latest I have, although it probably won't
> apply to your tree:
I've decided to add that one afterwards giving the attribution to you.
There are some other patches in your tree as well, which I want to have
a look at too.
> ---
> From ac208da48d7f9d11eef8a01ac0c6fbf9681665b5 Mon Sep 17 00:00:00 2001
> From: Christoph Hellwig <hch@lst.de>
> Date: Thu, 22 Jun 2023 05:53:13 +0200
> Subject: btrfs: move raid-stripe-tree handling out of set_io_stripe
>
> set_io_stripe gets a little too complicated with the raid-stripe-tree
> handling. Move it out into the only callers that actually needs it.
>
> The only reads with more than a single stripe is the parity raid recovery
> case thast will need very special handling anyway once implemented.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> fs/btrfs/volumes.c | 61 ++++++++++++++++++++--------------------------
> 1 file changed, 27 insertions(+), 34 deletions(-)
>
> diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
> index 30ee5d1670d034..e32eefa242b0a4 100644
> --- a/fs/btrfs/volumes.c
> +++ b/fs/btrfs/volumes.c
> @@ -6233,22 +6233,12 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op,
> return U64_MAX;
> }
>
> -static int set_io_stripe(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
> - u64 logical, u64 *length, struct btrfs_io_stripe *dst,
> - struct map_lookup *map, u32 stripe_index,
> - u64 stripe_offset, u64 stripe_nr)
> +static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map,
> + u32 stripe_index, u64 stripe_offset, u32 stripe_nr)
> {
> dst->dev = map->stripes[stripe_index].dev;
> -
> - if (op == BTRFS_MAP_READ &&
> - btrfs_use_raid_stripe_tree(fs_info, map->type))
> - return btrfs_get_raid_extent_offset(fs_info, logical, length,
> - map->type, stripe_index,
> - dst);
> -
> dst->physical = map->stripes[stripe_index].physical +
> stripe_offset + ((u64)stripe_nr << BTRFS_STRIPE_LEN_SHIFT);
> - return 0;
> }
>
> int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
> @@ -6423,15 +6413,24 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
> * physical block information on the stack instead of allocating an
> * I/O context structure.
> */
> - if (smap && num_alloc_stripes == 1 &&
> - !(btrfs_use_raid_stripe_tree(fs_info, map->type) &&
> - op != BTRFS_MAP_READ) &&
> - !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)) {
> - ret = set_io_stripe(fs_info, op, logical, length, smap, map,
> - stripe_index, stripe_offset, stripe_nr);
> - *mirror_num_ret = mirror_num;
> - *bioc_ret = NULL;
> - goto out;
> + if (smap && num_alloc_stripes == 1) {
> + if (op == BTRFS_MAP_READ &&
> + btrfs_use_raid_stripe_tree(fs_info, map->type)) {
> + ret = btrfs_get_raid_extent_offset(fs_info, logical,
> + length, map->type,
> + stripe_index, smap);
> + *mirror_num_ret = mirror_num;
> + *bioc_ret = NULL;
> + goto out;
> + } else if (!(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
> + mirror_num == 0) {
> + set_io_stripe(smap, map, stripe_index, stripe_offset,
> + stripe_nr);
> + *mirror_num_ret = mirror_num;
> + *bioc_ret = NULL;
> + ret = 0;
> + goto out;
> + }
> }
>
> bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes);
> @@ -6448,6 +6447,8 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
> *
> * It's still mostly the same as other profiles, just with extra rotation.
> */
> + ASSERT(op != BTRFS_MAP_READ ||
> + btrfs_use_raid_stripe_tree(fs_info, map->type));
> if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
> (op != BTRFS_MAP_READ || mirror_num > 1)) {
> /*
> @@ -6461,29 +6462,21 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
> bioc->full_stripe_logical = em->start +
> ((stripe_nr * data_stripes) << BTRFS_STRIPE_LEN_SHIFT);
> for (i = 0; i < num_stripes; i++)
> - ret = set_io_stripe(fs_info, op, logical, length,
> - &bioc->stripes[i], map,
> - (i + stripe_nr) % num_stripes,
> - stripe_offset, stripe_nr);
> + set_io_stripe(&bioc->stripes[i], map,
> + (i + stripe_nr) % num_stripes,
> + stripe_offset, stripe_nr);
> } else {
> /*
> * For all other non-RAID56 profiles, just copy the target
> * stripe into the bioc.
> */
> for (i = 0; i < num_stripes; i++) {
> - ret = set_io_stripe(fs_info, op, logical, length,
> - &bioc->stripes[i], map, stripe_index,
> - stripe_offset, stripe_nr);
> + set_io_stripe(&bioc->stripes[i], map, stripe_index,
> + stripe_offset, stripe_nr);
> stripe_index++;
> }
> }
>
> - if (ret) {
> - *bioc_ret = NULL;
> - btrfs_put_bioc(bioc);
> - goto out;
> - }
> -
> if (op != BTRFS_MAP_READ)
> max_errors = btrfs_chunk_max_errors(map);
>
On 13.12.23 09:58, Christoph Hellwig wrote: > On Tue, Dec 12, 2023 at 04:38:09AM -0800, Johannes Thumshirn wrote: >> Open code set_io_stripe() for RAID56, as it a) uses a different method to >> calculate the stripe_index and b) doesn't need to go through raid-stripe-tree >> mapping code. > > Looks good: > > Reviewed-by: Christoph Hellwig <hch@lst.de> > > I think raid stripe tree handling also really should move out of > set_io_stripe. Below is the latest I have, although it probably won't > apply to your tree: > That would work as well and replace patch 1 then. Let me think about it.
On Wed, Dec 13, 2023 at 09:09:47AM +0000, Johannes Thumshirn wrote: > > I think raid stripe tree handling also really should move out of > > set_io_stripe. Below is the latest I have, although it probably won't > > apply to your tree: > > > > That would work as well and replace patch 1 then. Let me think about it. I actually really like splitting that check into a helper for documentation purposes. Btw, this my full tree that the patch is from in case it is useful: http://git.infradead.org/users/hch/misc.git/shortlog/refs/heads/raid-stripe-tree-cleanups
On 13.12.23 10:17, hch@infradead.org wrote: > On Wed, Dec 13, 2023 at 09:09:47AM +0000, Johannes Thumshirn wrote: >>> I think raid stripe tree handling also really should move out of >>> set_io_stripe. Below is the latest I have, although it probably won't >>> apply to your tree: >>> >> >> That would work as well and replace patch 1 then. Let me think about it. > > I actually really like splitting that check into a helper for > documentation purposes. Btw, this my full tree that the patch is from > in case it is useful: > > http://git.infradead.org/users/hch/misc.git/shortlog/refs/heads/raid-stripe-tree-cleanups > Cool thanks, I'll have a look :)
© 2016 - 2025 Red Hat, Inc.