From: Zhang Yi <yi.zhang@huawei.com>
The parameter max_hw_wzeroes_unmap_sectors in queue_limits should be
equal to max_write_zeroes_sectors if it is set to a non-zero value.
However, the stacked md drivers call md_init_stacking_limits() to
initialize this parameter to UINT_MAX but only adjust
max_write_zeroes_sectors when setting limits. Therefore, this
discrepancy triggers a value check failure in blk_validate_limits().
Fix this failure by explicitly setting max_hw_wzeroes_unmap_sectors to
zero.
Fixes: 0c40d7cb5ef3 ("block: introduce max_{hw|user}_wzeroes_unmap_sectors to queue limits")
Reported-by: John Garry <john.g.garry@oracle.com>
Closes: https://lore.kernel.org/linux-block/803a2183-a0bb-4b7a-92f1-afc5097630d2@oracle.com/
Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
---
drivers/md/md-linear.c | 1 +
drivers/md/raid0.c | 1 +
drivers/md/raid1.c | 1 +
drivers/md/raid10.c | 1 +
drivers/md/raid5.c | 1 +
5 files changed, 5 insertions(+)
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 5d9b08115375..3e1f165c2d20 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f1d8811a542a..419139ad7663 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.chunk_sectors = mddev->chunk_sectors;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 408c26398321..35c6498b4917 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b60c30bfb6c7..9832eefb2f15 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4008,6 +4008,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.chunk_sectors = mddev->chunk_sectors;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 023649fe2476..e385ef1355e8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7732,6 +7732,7 @@ static int raid5_set_limits(struct mddev *mddev)
lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
lim.discard_granularity = stripe;
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim, 0);
rdev_for_each(rdev, mddev)
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
--
2.46.1
On 25/08/2025 09:33, Zhang Yi wrote: > From: Zhang Yi <yi.zhang@huawei.com> > > The parameter max_hw_wzeroes_unmap_sectors in queue_limits should be > equal to max_write_zeroes_sectors if it is set to a non-zero value. > However, the stacked md drivers call md_init_stacking_limits() to > initialize this parameter to UINT_MAX but only adjust > max_write_zeroes_sectors when setting limits. Therefore, this > discrepancy triggers a value check failure in blk_validate_limits(). > > Fix this failure by explicitly setting max_hw_wzeroes_unmap_sectors to > zero. > > Fixes: 0c40d7cb5ef3 ("block: introduce max_{hw|user}_wzeroes_unmap_sectors to queue limits") > Reported-by: John Garry <john.g.garry@oracle.com> > Closes: https://lore.kernel.org/linux-block/803a2183-a0bb-4b7a-92f1-afc5097630d2@oracle.com/ > Signed-off-by: Zhang Yi <yi.zhang@huawei.com> Tested-by: John Garry <john.g.garry@oracle.com> # raid 0/1/10 > diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c > index f1d8811a542a..419139ad7663 100644 > --- a/drivers/md/raid0.c > +++ b/drivers/md/raid0.c > @@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev) > md_init_stacking_limits(&lim); > lim.max_hw_sectors = mddev->chunk_sectors; > lim.max_write_zeroes_sectors = mddev->chunk_sectors; > + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; > lim.io_min = mddev->chunk_sectors << 9; > lim.io_opt = lim.io_min * mddev->raid_disks; > lim.chunk_sectors = mddev->chunk_sectors; > diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c > index 408c26398321..35c6498b4917 100644 > --- a/drivers/md/raid1.c > +++ b/drivers/md/raid1.c > @@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev) > > md_init_stacking_limits(&lim); > lim.max_write_zeroes_sectors = 0; > + lim.max_hw_wzeroes_unmap_sectors = 0; It would be better if we documented why we cannot support this on raid1/10, yet we can on raid0. I am looking through the history of why max_write_zeroes_sectors is set to zero. I have gone as far back as 5026d7a9b, and this tells us that the retry mechanism for WRITE SAME causes an issue where mirrors are offlined (and so we disabled the support); and this was simply copied for write zeroes in 3deff1a70.
On 9/2/2025 8:25 PM, John Garry wrote: > On 25/08/2025 09:33, Zhang Yi wrote: >> From: Zhang Yi <yi.zhang@huawei.com> >> >> The parameter max_hw_wzeroes_unmap_sectors in queue_limits should be >> equal to max_write_zeroes_sectors if it is set to a non-zero value. >> However, the stacked md drivers call md_init_stacking_limits() to >> initialize this parameter to UINT_MAX but only adjust >> max_write_zeroes_sectors when setting limits. Therefore, this >> discrepancy triggers a value check failure in blk_validate_limits(). >> >> Fix this failure by explicitly setting max_hw_wzeroes_unmap_sectors to >> zero. >> >> Fixes: 0c40d7cb5ef3 ("block: introduce max_{hw|user}_wzeroes_unmap_sectors to queue limits") >> Reported-by: John Garry <john.g.garry@oracle.com> >> Closes: https://lore.kernel.org/linux-block/803a2183-a0bb-4b7a-92f1-afc5097630d2@oracle.com/ >> Signed-off-by: Zhang Yi <yi.zhang@huawei.com> > > Tested-by: John Garry <john.g.garry@oracle.com> # raid 0/1/10 Thank you for the test! > >> diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c >> index f1d8811a542a..419139ad7663 100644 >> --- a/drivers/md/raid0.c >> +++ b/drivers/md/raid0.c >> @@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev) >> md_init_stacking_limits(&lim); >> lim.max_hw_sectors = mddev->chunk_sectors; >> lim.max_write_zeroes_sectors = mddev->chunk_sectors; >> + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; >> lim.io_min = mddev->chunk_sectors << 9; >> lim.io_opt = lim.io_min * mddev->raid_disks; >> lim.chunk_sectors = mddev->chunk_sectors; >> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c >> index 408c26398321..35c6498b4917 100644 >> --- a/drivers/md/raid1.c >> +++ b/drivers/md/raid1.c >> @@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev) >> md_init_stacking_limits(&lim); >> lim.max_write_zeroes_sectors = 0; >> + lim.max_hw_wzeroes_unmap_sectors = 0; > > It would be better if we documented why we cannot support this on raid1/10, yet we can on raid0. > > I am looking through the history of why max_write_zeroes_sectors is set to zero. I have gone as far back as 5026d7a9b, and this tells us that the retry mechanism for WRITE SAME causes an issue where mirrors are offlined (and so we disabled the support); and this was simply copied for write zeroes in 3deff1a70. Yes, as discussed with Kuai, it's better to add TODO comments for RAID 1, 10, and 5 for now, and we can support them by properly propagating unsupported errors to the upper layers. I can send out a separate patch to add this comment. Thanks, Yi.
On 10/09/2025 09:55, Zhang Yi wrote: >> It would be better if we documented why we cannot support this on raid1/10, yet we can on raid0. >> >> I am looking through the history of why max_write_zeroes_sectors is set to zero. I have gone as far back as 5026d7a9b, and this tells us that the retry mechanism for WRITE SAME causes an issue where mirrors are offlined (and so we disabled the support); and this was simply copied for write zeroes in 3deff1a70. > Yes, as discussed with Kuai, it's better to add TODO comments for > RAID 1, 10, and 5 for now, and we can support them by properly > propagating unsupported errors to the upper layers. I can send out > a separate patch to add this comment. Sure, adding a comment would be good, detailing the technical challenge in supporting it. For now, this series should go in ASAP.
Hi, 在 2025/09/02 20:25, John Garry 写道: >> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c >> index 408c26398321..35c6498b4917 100644 >> --- a/drivers/md/raid1.c >> +++ b/drivers/md/raid1.c >> @@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev) >> md_init_stacking_limits(&lim); >> lim.max_write_zeroes_sectors = 0; >> + lim.max_hw_wzeroes_unmap_sectors = 0; > > It would be better if we documented why we cannot support this on > raid1/10, yet we can on raid0. > > I am looking through the history of why max_write_zeroes_sectors is set > to zero. I have gone as far back as 5026d7a9b, and this tells us that > the retry mechanism for WRITE SAME causes an issue where mirrors are > offlined (and so we disabled the support); and this was simply copied > for write zeroes in 3deff1a70. Yes, we don't support it for now, and I think it is not too hard to support write zeros, and finaly to support unmap zeros. BTW, raid5 discard is in the same suituation. However, I feel this is not related to this set, perhaps a seperate patch to add comments, I can accept that. Thanks, Kuai
Dear Yi, Thank you for your patch. Am 25.08.25 um 10:33 schrieb Zhang Yi: > From: Zhang Yi <yi.zhang@huawei.com> > > The parameter max_hw_wzeroes_unmap_sectors in queue_limits should be > equal to max_write_zeroes_sectors if it is set to a non-zero value. Excuse my ignorance, but why? > However, the stacked md drivers call md_init_stacking_limits() to > initialize this parameter to UINT_MAX but only adjust > max_write_zeroes_sectors when setting limits. Therefore, this > discrepancy triggers a value check failure in blk_validate_limits(). > > Fix this failure by explicitly setting max_hw_wzeroes_unmap_sectors to > zero. In `linear_set_limits()` and `raid0_set_limits()` you set it to `mddev->chunk_sectors`. Is that intentional? > Fixes: 0c40d7cb5ef3 ("block: introduce max_{hw|user}_wzeroes_unmap_sectors to queue limits") > Reported-by: John Garry <john.g.garry@oracle.com> > Closes: https://lore.kernel.org/linux-block/803a2183-a0bb-4b7a-92f1-afc5097630d2@oracle.com/ It’d be great if you added the test case to the commit message. > Signed-off-by: Zhang Yi <yi.zhang@huawei.com> > --- > drivers/md/md-linear.c | 1 + > drivers/md/raid0.c | 1 + > drivers/md/raid1.c | 1 + > drivers/md/raid10.c | 1 + > drivers/md/raid5.c | 1 + > 5 files changed, 5 insertions(+) > > diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c > index 5d9b08115375..3e1f165c2d20 100644 > --- a/drivers/md/md-linear.c > +++ b/drivers/md/md-linear.c > @@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev) > md_init_stacking_limits(&lim); > lim.max_hw_sectors = mddev->chunk_sectors; > lim.max_write_zeroes_sectors = mddev->chunk_sectors; > + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; > lim.io_min = mddev->chunk_sectors << 9; > err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); > if (err) > diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c > index f1d8811a542a..419139ad7663 100644 > --- a/drivers/md/raid0.c > +++ b/drivers/md/raid0.c > @@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev) > md_init_stacking_limits(&lim); > lim.max_hw_sectors = mddev->chunk_sectors; > lim.max_write_zeroes_sectors = mddev->chunk_sectors; > + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; > lim.io_min = mddev->chunk_sectors << 9; > lim.io_opt = lim.io_min * mddev->raid_disks; > lim.chunk_sectors = mddev->chunk_sectors; > diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c > index 408c26398321..35c6498b4917 100644 > --- a/drivers/md/raid1.c > +++ b/drivers/md/raid1.c > @@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev) > > md_init_stacking_limits(&lim); > lim.max_write_zeroes_sectors = 0; > + lim.max_hw_wzeroes_unmap_sectors = 0; > lim.features |= BLK_FEAT_ATOMIC_WRITES; > err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); > if (err) > diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c > index b60c30bfb6c7..9832eefb2f15 100644 > --- a/drivers/md/raid10.c > +++ b/drivers/md/raid10.c > @@ -4008,6 +4008,7 @@ static int raid10_set_queue_limits(struct mddev *mddev) > > md_init_stacking_limits(&lim); > lim.max_write_zeroes_sectors = 0; > + lim.max_hw_wzeroes_unmap_sectors = 0; > lim.io_min = mddev->chunk_sectors << 9; > lim.chunk_sectors = mddev->chunk_sectors; > lim.io_opt = lim.io_min * raid10_nr_stripes(conf); > diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c > index 023649fe2476..e385ef1355e8 100644 > --- a/drivers/md/raid5.c > +++ b/drivers/md/raid5.c > @@ -7732,6 +7732,7 @@ static int raid5_set_limits(struct mddev *mddev) > lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE; > lim.discard_granularity = stripe; > lim.max_write_zeroes_sectors = 0; > + lim.max_hw_wzeroes_unmap_sectors = 0; > mddev_stack_rdev_limits(mddev, &lim, 0); > rdev_for_each(rdev, mddev) > queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
Hi, Paul! On 8/25/2025 4:59 PM, Paul Menzel wrote: > Dear Yi, > > > Thank you for your patch. > > Am 25.08.25 um 10:33 schrieb Zhang Yi: >> From: Zhang Yi <yi.zhang@huawei.com> >> >> The parameter max_hw_wzeroes_unmap_sectors in queue_limits should be >> equal to max_write_zeroes_sectors if it is set to a non-zero value. > > Excuse my ignorance, but why? Currently, the max_hw_wzeroes_unmap_sectors parameter is used only to determine whether the backend device supports the "unmap write zeroes" operation. If it is set to a non-zero value, it indicates that the device supports this operation. It depends on the device supports the write zeroes command, which means that the max_write_zeroes_sectors should not be zero. However, we do not use this specific value, so the max_hw_wzeroes_unmap_sectors can only have one of two values: max_write_zeroes_sectors or 0, any other value is meaningless. > >> However, the stacked md drivers call md_init_stacking_limits() to >> initialize this parameter to UINT_MAX but only adjust >> max_write_zeroes_sectors when setting limits. Therefore, this >> discrepancy triggers a value check failure in blk_validate_limits(). >> >> Fix this failure by explicitly setting max_hw_wzeroes_unmap_sectors to >> zero. > > In `linear_set_limits()` and `raid0_set_limits()` you set it to `mddev->chunk_sectors`. Is that intentional? Yes, the linear and raid0 drivers can support unmap write zeroes operation if all of the backend devices supports it, so we can initialize it to chunk_sectors (the same to max_write_zeroes_sectors). raid1/10/5 drivers doesn't support write zeroes, so we have to set it to zero. > >> Fixes: 0c40d7cb5ef3 ("block: introduce max_{hw|user}_wzeroes_unmap_sectors to queue limits") >> Reported-by: John Garry <john.g.garry@oracle.com> >> Closes: https://lore.kernel.org/linux-block/803a2183-a0bb-4b7a-92f1-afc5097630d2@oracle.com/ > > It’d be great if you added the test case to the commit message. Yeah, I will add a test to blktests. Thanks, Yi. > >> Signed-off-by: Zhang Yi <yi.zhang@huawei.com> >> --- >> drivers/md/md-linear.c | 1 + >> drivers/md/raid0.c | 1 + >> drivers/md/raid1.c | 1 + >> drivers/md/raid10.c | 1 + >> drivers/md/raid5.c | 1 + >> 5 files changed, 5 insertions(+) >> >> diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c >> index 5d9b08115375..3e1f165c2d20 100644 >> --- a/drivers/md/md-linear.c >> +++ b/drivers/md/md-linear.c >> @@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev) >> md_init_stacking_limits(&lim); >> lim.max_hw_sectors = mddev->chunk_sectors; >> lim.max_write_zeroes_sectors = mddev->chunk_sectors; >> + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; >> lim.io_min = mddev->chunk_sectors << 9; >> err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); >> if (err) >> diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c >> index f1d8811a542a..419139ad7663 100644 >> --- a/drivers/md/raid0.c >> +++ b/drivers/md/raid0.c >> @@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev) >> md_init_stacking_limits(&lim); >> lim.max_hw_sectors = mddev->chunk_sectors; >> lim.max_write_zeroes_sectors = mddev->chunk_sectors; >> + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; >> lim.io_min = mddev->chunk_sectors << 9; >> lim.io_opt = lim.io_min * mddev->raid_disks; >> lim.chunk_sectors = mddev->chunk_sectors; >> diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c >> index 408c26398321..35c6498b4917 100644 >> --- a/drivers/md/raid1.c >> +++ b/drivers/md/raid1.c >> @@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev) >> md_init_stacking_limits(&lim); >> lim.max_write_zeroes_sectors = 0; >> + lim.max_hw_wzeroes_unmap_sectors = 0; >> lim.features |= BLK_FEAT_ATOMIC_WRITES; >> err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); >> if (err) >> diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c >> index b60c30bfb6c7..9832eefb2f15 100644 >> --- a/drivers/md/raid10.c >> +++ b/drivers/md/raid10.c >> @@ -4008,6 +4008,7 @@ static int raid10_set_queue_limits(struct mddev *mddev) >> md_init_stacking_limits(&lim); >> lim.max_write_zeroes_sectors = 0; >> + lim.max_hw_wzeroes_unmap_sectors = 0; >> lim.io_min = mddev->chunk_sectors << 9; >> lim.chunk_sectors = mddev->chunk_sectors; >> lim.io_opt = lim.io_min * raid10_nr_stripes(conf); >> diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c >> index 023649fe2476..e385ef1355e8 100644 >> --- a/drivers/md/raid5.c >> +++ b/drivers/md/raid5.c >> @@ -7732,6 +7732,7 @@ static int raid5_set_limits(struct mddev *mddev) >> lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE; >> lim.discard_granularity = stripe; >> lim.max_write_zeroes_sectors = 0; >> + lim.max_hw_wzeroes_unmap_sectors = 0; >> mddev_stack_rdev_limits(mddev, &lim, 0); >> rdev_for_each(rdev, mddev) >> queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
在 2025/8/25 16:33, Zhang Yi 写道: > From: Zhang Yi <yi.zhang@huawei.com> > > The parameter max_hw_wzeroes_unmap_sectors in queue_limits should be > equal to max_write_zeroes_sectors if it is set to a non-zero value. > However, the stacked md drivers call md_init_stacking_limits() to > initialize this parameter to UINT_MAX but only adjust > max_write_zeroes_sectors when setting limits. Therefore, this > discrepancy triggers a value check failure in blk_validate_limits(). > > Fix this failure by explicitly setting max_hw_wzeroes_unmap_sectors to > zero. > > Fixes: 0c40d7cb5ef3 ("block: introduce max_{hw|user}_wzeroes_unmap_sectors to queue limits") > Reported-by: John Garry <john.g.garry@oracle.com> > Closes: https://lore.kernel.org/linux-block/803a2183-a0bb-4b7a-92f1-afc5097630d2@oracle.com/ > Signed-off-by: Zhang Yi <yi.zhang@huawei.com> > --- > drivers/md/md-linear.c | 1 + > drivers/md/raid0.c | 1 + > drivers/md/raid1.c | 1 + > drivers/md/raid10.c | 1 + > drivers/md/raid5.c | 1 + > 5 files changed, 5 insertions(+) > > diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c > index 5d9b08115375..3e1f165c2d20 100644 > --- a/drivers/md/md-linear.c > +++ b/drivers/md/md-linear.c > @@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev) > md_init_stacking_limits(&lim); > lim.max_hw_sectors = mddev->chunk_sectors; > lim.max_write_zeroes_sectors = mddev->chunk_sectors; > + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; > lim.io_min = mddev->chunk_sectors << 9; > err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); > if (err) > diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c > index f1d8811a542a..419139ad7663 100644 > --- a/drivers/md/raid0.c > +++ b/drivers/md/raid0.c > @@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev) > md_init_stacking_limits(&lim); > lim.max_hw_sectors = mddev->chunk_sectors; > lim.max_write_zeroes_sectors = mddev->chunk_sectors; > + lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors; > lim.io_min = mddev->chunk_sectors << 9; > lim.io_opt = lim.io_min * mddev->raid_disks; > lim.chunk_sectors = mddev->chunk_sectors; > diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c > index 408c26398321..35c6498b4917 100644 > --- a/drivers/md/raid1.c > +++ b/drivers/md/raid1.c > @@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev) > > md_init_stacking_limits(&lim); > lim.max_write_zeroes_sectors = 0; > + lim.max_hw_wzeroes_unmap_sectors = 0; > lim.features |= BLK_FEAT_ATOMIC_WRITES; > err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); > if (err) > diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c > index b60c30bfb6c7..9832eefb2f15 100644 > --- a/drivers/md/raid10.c > +++ b/drivers/md/raid10.c > @@ -4008,6 +4008,7 @@ static int raid10_set_queue_limits(struct mddev *mddev) > > md_init_stacking_limits(&lim); > lim.max_write_zeroes_sectors = 0; > + lim.max_hw_wzeroes_unmap_sectors = 0; > lim.io_min = mddev->chunk_sectors << 9; > lim.chunk_sectors = mddev->chunk_sectors; > lim.io_opt = lim.io_min * raid10_nr_stripes(conf); > diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c > index 023649fe2476..e385ef1355e8 100644 > --- a/drivers/md/raid5.c > +++ b/drivers/md/raid5.c > @@ -7732,6 +7732,7 @@ static int raid5_set_limits(struct mddev *mddev) > lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE; > lim.discard_granularity = stripe; > lim.max_write_zeroes_sectors = 0; > + lim.max_hw_wzeroes_unmap_sectors = 0; > mddev_stack_rdev_limits(mddev, &lim, 0); > rdev_for_each(rdev, mddev) > queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset, LGTM, feel free to add Reviewed-by: Li Nan <linan122@huawei.com> -- Thanks, Nan
© 2016 - 2025 Red Hat, Inc.