From: Jason Gunthorpe <jgg@nvidia.com>
C_BAD_STE was observed when updating nested STE from an S1-bypass mode to
an S1DSS-bypass mode. As both modes enabled S2, the used bit is slightly
different than the normal S1-bypass and S1DSS-bypass modes. As a result,
fields like MEV and EATS in S2's used list marked the word1 as a critical
word that requested a STE.V=0. This breaks a hitless update.
However, both MEV and EATS aren't critical in terms of STE update. One
controls the merge of the events and the other controls the ATS that is
managed by the driver at the same time via pci_enable_ats().
Add an arm_smmu_get_ste_ignored() to allow STE update algorithm to ignore
those fields, avoiding the STE update breakages.
Note that this change is required by both MEV and EATS fields, which were
introduced in different kernel versions. So add this get_ignored() first.
The MEV and EATS will be added in arm_smmu_get_ste_ignored() separately.
Fixes: 1e8be08d1c91 ("iommu/arm-smmu-v3: Support IOMMU_DOMAIN_NESTED")
Cc: stable@vger.kernel.org
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Shuai Xue <xueshuai@linux.alibaba.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 2 ++
.../iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c | 19 ++++++++++++---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 24 +++++++++++++++----
3 files changed, 37 insertions(+), 8 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index ae23aacc3840..d5f0e5407b9f 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -900,6 +900,7 @@ struct arm_smmu_entry_writer {
struct arm_smmu_entry_writer_ops {
void (*get_used)(const __le64 *entry, __le64 *used);
+ void (*get_ignored)(__le64 *ignored_bits);
void (*sync)(struct arm_smmu_entry_writer *writer);
};
@@ -911,6 +912,7 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
#if IS_ENABLED(CONFIG_KUNIT)
void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits);
+void arm_smmu_get_ste_ignored(__le64 *ignored_bits);
void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur,
const __le64 *target);
void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
index d2671bfd3798..3556e65cf9ac 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
@@ -38,13 +38,16 @@ enum arm_smmu_test_master_feat {
static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
const __le64 *used_bits,
const __le64 *target,
+ const __le64 *ignored,
unsigned int length)
{
bool differs = false;
unsigned int i;
for (i = 0; i < length; i++) {
- if ((entry[i] & used_bits[i]) != target[i])
+ __le64 used = used_bits[i] & ~ignored[i];
+
+ if ((entry[i] & used) != (target[i] & used))
differs = true;
}
return differs;
@@ -56,12 +59,18 @@ arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
struct arm_smmu_test_writer *test_writer =
container_of(writer, struct arm_smmu_test_writer, writer);
__le64 *entry_used_bits;
+ __le64 *ignored;
entry_used_bits = kunit_kzalloc(
test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
+ ignored = kunit_kzalloc(test_writer->test,
+ sizeof(*ignored) * NUM_ENTRY_QWORDS,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test_writer->test, ignored);
+
pr_debug("STE value is now set to: ");
print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8,
test_writer->entry,
@@ -79,14 +88,17 @@ arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
* configuration.
*/
writer->ops->get_used(test_writer->entry, entry_used_bits);
+ if (writer->ops->get_ignored)
+ writer->ops->get_ignored(ignored);
KUNIT_EXPECT_FALSE(
test_writer->test,
arm_smmu_entry_differs_in_used_bits(
test_writer->entry, entry_used_bits,
- test_writer->init_entry, NUM_ENTRY_QWORDS) &&
+ test_writer->init_entry, ignored,
+ NUM_ENTRY_QWORDS) &&
arm_smmu_entry_differs_in_used_bits(
test_writer->entry, entry_used_bits,
- test_writer->target_entry,
+ test_writer->target_entry, ignored,
NUM_ENTRY_QWORDS));
}
}
@@ -106,6 +118,7 @@ arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer,
static const struct arm_smmu_entry_writer_ops test_ste_ops = {
.sync = arm_smmu_test_writer_record_syncs,
.get_used = arm_smmu_get_ste_used,
+ .get_ignored = arm_smmu_get_ste_ignored,
};
static const struct arm_smmu_entry_writer_ops test_cd_ops = {
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index d16d35c78c06..e22c0890041b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1082,6 +1082,12 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
}
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used);
+VISIBLE_IF_KUNIT
+void arm_smmu_get_ste_ignored(__le64 *ignored_bits)
+{
+}
+EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_ignored);
+
/*
* Figure out if we can do a hitless update of entry to become target. Returns a
* bit mask where 1 indicates that qword needs to be set disruptively.
@@ -1094,13 +1100,22 @@ static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
{
__le64 target_used[NUM_ENTRY_QWORDS] = {};
__le64 cur_used[NUM_ENTRY_QWORDS] = {};
+ __le64 ignored[NUM_ENTRY_QWORDS] = {};
u8 used_qword_diff = 0;
unsigned int i;
writer->ops->get_used(entry, cur_used);
writer->ops->get_used(target, target_used);
+ if (writer->ops->get_ignored)
+ writer->ops->get_ignored(ignored);
for (i = 0; i != NUM_ENTRY_QWORDS; i++) {
+ /*
+ * Ignored is only used for bits that are used by both entries,
+ * otherwise it is sequenced according to the unused entry.
+ */
+ ignored[i] &= target_used[i] & cur_used[i];
+
/*
* Check that masks are up to date, the make functions are not
* allowed to set a bit to 1 if the used function doesn't say it
@@ -1109,6 +1124,7 @@ static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
WARN_ON_ONCE(target[i] & ~target_used[i]);
/* Bits can change because they are not currently being used */
+ cur_used[i] &= ~ignored[i];
unused_update[i] = (entry[i] & cur_used[i]) |
(target[i] & ~cur_used[i]);
/*
@@ -1207,12 +1223,9 @@ void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry,
entry_set(writer, entry, target, 0, 1);
} else {
/*
- * No inuse bit changed. Sanity check that all unused bits are 0
- * in the entry. The target was already sanity checked by
- * compute_qword_diff().
+ * No inuse bit changed, though ignored bits may have changed.
*/
- WARN_ON_ONCE(
- entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
+ entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS);
}
}
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
@@ -1543,6 +1556,7 @@ static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer)
static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
.sync = arm_smmu_ste_writer_sync_entry,
.get_used = arm_smmu_get_ste_used,
+ .get_ignored = arm_smmu_get_ste_ignored,
};
static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
--
2.43.0
Hi Nicolin,
On Tue, Dec 09, 2025 at 06:45:16PM -0800, Nicolin Chen wrote:
> From: Jason Gunthorpe <jgg@nvidia.com>
>
> C_BAD_STE was observed when updating nested STE from an S1-bypass mode to
> an S1DSS-bypass mode. As both modes enabled S2, the used bit is slightly
> different than the normal S1-bypass and S1DSS-bypass modes. As a result,
> fields like MEV and EATS in S2's used list marked the word1 as a critical
> word that requested a STE.V=0. This breaks a hitless update.
>
> However, both MEV and EATS aren't critical in terms of STE update. One
> controls the merge of the events and the other controls the ATS that is
> managed by the driver at the same time via pci_enable_ats().
>
> Add an arm_smmu_get_ste_ignored() to allow STE update algorithm to ignore
> those fields, avoiding the STE update breakages.
>
> Note that this change is required by both MEV and EATS fields, which were
> introduced in different kernel versions. So add this get_ignored() first.
> The MEV and EATS will be added in arm_smmu_get_ste_ignored() separately.
>
> Fixes: 1e8be08d1c91 ("iommu/arm-smmu-v3: Support IOMMU_DOMAIN_NESTED")
> Cc: stable@vger.kernel.org
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> Reviewed-by: Shuai Xue <xueshuai@linux.alibaba.com>
> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> ---
> drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 2 ++
> .../iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c | 19 ++++++++++++---
> drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 24 +++++++++++++++----
> 3 files changed, 37 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> index ae23aacc3840..d5f0e5407b9f 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
> @@ -900,6 +900,7 @@ struct arm_smmu_entry_writer {
>
> struct arm_smmu_entry_writer_ops {
> void (*get_used)(const __le64 *entry, __le64 *used);
> + void (*get_ignored)(__le64 *ignored_bits);
> void (*sync)(struct arm_smmu_entry_writer *writer);
> };
>
> @@ -911,6 +912,7 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
>
> #if IS_ENABLED(CONFIG_KUNIT)
> void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits);
> +void arm_smmu_get_ste_ignored(__le64 *ignored_bits);
> void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur,
> const __le64 *target);
> void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits);
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
> index d2671bfd3798..3556e65cf9ac 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
> @@ -38,13 +38,16 @@ enum arm_smmu_test_master_feat {
> static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
> const __le64 *used_bits,
> const __le64 *target,
> + const __le64 *ignored,
> unsigned int length)
> {
> bool differs = false;
> unsigned int i;
>
> for (i = 0; i < length; i++) {
> - if ((entry[i] & used_bits[i]) != target[i])
> + __le64 used = used_bits[i] & ~ignored[i];
> +
> + if ((entry[i] & used) != (target[i] & used))
> differs = true;
> }
> return differs;
> @@ -56,12 +59,18 @@ arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
> struct arm_smmu_test_writer *test_writer =
> container_of(writer, struct arm_smmu_test_writer, writer);
> __le64 *entry_used_bits;
> + __le64 *ignored;
>
> entry_used_bits = kunit_kzalloc(
> test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
> GFP_KERNEL);
> KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
>
> + ignored = kunit_kzalloc(test_writer->test,
> + sizeof(*ignored) * NUM_ENTRY_QWORDS,
> + GFP_KERNEL);
> + KUNIT_ASSERT_NOT_NULL(test_writer->test, ignored);
> +
> pr_debug("STE value is now set to: ");
> print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8,
> test_writer->entry,
> @@ -79,14 +88,17 @@ arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
> * configuration.
> */
> writer->ops->get_used(test_writer->entry, entry_used_bits);
> + if (writer->ops->get_ignored)
> + writer->ops->get_ignored(ignored);
> KUNIT_EXPECT_FALSE(
> test_writer->test,
> arm_smmu_entry_differs_in_used_bits(
> test_writer->entry, entry_used_bits,
> - test_writer->init_entry, NUM_ENTRY_QWORDS) &&
> + test_writer->init_entry, ignored,
> + NUM_ENTRY_QWORDS) &&
> arm_smmu_entry_differs_in_used_bits(
> test_writer->entry, entry_used_bits,
> - test_writer->target_entry,
> + test_writer->target_entry, ignored,
> NUM_ENTRY_QWORDS));
> }
> }
> @@ -106,6 +118,7 @@ arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer,
> static const struct arm_smmu_entry_writer_ops test_ste_ops = {
> .sync = arm_smmu_test_writer_record_syncs,
> .get_used = arm_smmu_get_ste_used,
> + .get_ignored = arm_smmu_get_ste_ignored,
> };
>
> static const struct arm_smmu_entry_writer_ops test_cd_ops = {
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> index d16d35c78c06..e22c0890041b 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> @@ -1082,6 +1082,12 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
> }
> EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used);
>
> +VISIBLE_IF_KUNIT
> +void arm_smmu_get_ste_ignored(__le64 *ignored_bits)
> +{
> +}
> +EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_ignored);
> +
> /*
> * Figure out if we can do a hitless update of entry to become target. Returns a
> * bit mask where 1 indicates that qword needs to be set disruptively.
> @@ -1094,13 +1100,22 @@ static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
> {
> __le64 target_used[NUM_ENTRY_QWORDS] = {};
> __le64 cur_used[NUM_ENTRY_QWORDS] = {};
> + __le64 ignored[NUM_ENTRY_QWORDS] = {};
I think we can avoid extra stack allocation for another STE, if we make
the function update cur_used directly, but no strong opinion.
> u8 used_qword_diff = 0;
> unsigned int i;
>
> writer->ops->get_used(entry, cur_used);
> writer->ops->get_used(target, target_used);
> + if (writer->ops->get_ignored)
> + writer->ops->get_ignored(ignored);
>
> for (i = 0; i != NUM_ENTRY_QWORDS; i++) {
> + /*
> + * Ignored is only used for bits that are used by both entries,
> + * otherwise it is sequenced according to the unused entry.
> + */
> + ignored[i] &= target_used[i] & cur_used[i];
> +
> /*
> * Check that masks are up to date, the make functions are not
> * allowed to set a bit to 1 if the used function doesn't say it
> @@ -1109,6 +1124,7 @@ static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
> WARN_ON_ONCE(target[i] & ~target_used[i]);
>
> /* Bits can change because they are not currently being used */
> + cur_used[i] &= ~ignored[i];
> unused_update[i] = (entry[i] & cur_used[i]) |
> (target[i] & ~cur_used[i]);
> /*
> @@ -1207,12 +1223,9 @@ void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry,
> entry_set(writer, entry, target, 0, 1);
> } else {
> /*
> - * No inuse bit changed. Sanity check that all unused bits are 0
> - * in the entry. The target was already sanity checked by
> - * compute_qword_diff().
> + * No inuse bit changed, though ignored bits may have changed.
> */
> - WARN_ON_ONCE(
> - entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
> + entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS);
After this change, no other caller uses the entry_set() return value, so it
can be changed to return void.
> }
> }
> EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
> @@ -1543,6 +1556,7 @@ static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer)
> static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
> .sync = arm_smmu_ste_writer_sync_entry,
> .get_used = arm_smmu_get_ste_used,
> + .get_ignored = arm_smmu_get_ste_ignored,
> };
>
I have some mixed feelings about this, having get_used(), then get_ignored()
with the same bits set seems confusing to me, specially the get_ignored()
loops back to update cur_used, which is set from get_used()
My initial though was just to remove this bit from get_used() + some changes
to checks setting bits that are not used would be enough, and the semantics
of get_used() can be something as:
“Return bits used by the updated translation regime that MUST be observed
atomically” and in that case we can ignore things as MEV as it doesn’t
impact the translation.
However, this approach makes it a bit explicit which bits are ignored, if we
keep this logic, I think changing the name of get_ignored() might help, to
something as "get_allowed_break()" or "get_update_safe()"?
Thanks,
Mostafa
> static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
> --
> 2.43.0
>
On Sun, Dec 14, 2025 at 10:32:35PM +0000, Mostafa Saleh wrote:
> > * Figure out if we can do a hitless update of entry to become target. Returns a
> > * bit mask where 1 indicates that qword needs to be set disruptively.
> > @@ -1094,13 +1100,22 @@ static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
> > {
> > __le64 target_used[NUM_ENTRY_QWORDS] = {};
> > __le64 cur_used[NUM_ENTRY_QWORDS] = {};
> > + __le64 ignored[NUM_ENTRY_QWORDS] = {};
>
> I think we can avoid extra stack allocation for another STE, if we make
> the function update cur_used directly, but no strong opinion.
It does more than just mask cur_used, it also adjusts ignored:
> > + /*
> > + * Ignored is only used for bits that are used by both entries,
> > + * otherwise it is sequenced according to the unused entry.
> > + */
> > + ignored[i] &= target_used[i] & cur_used[i];
Which also explains this:
> I have some mixed feelings about this, having get_used(), then get_ignored()
> with the same bits set seems confusing to me, specially the get_ignored()
> loops back to update cur_used, which is set from get_used()
The same bits are set because of the above - we need to know what the
actual used bits are to decide if we need to rely on the ignored rule
to do the update.
> My initial though was just to remove this bit from get_used() + some changes
> to checks setting bits that are not used would be enough, and the semantics
> of get_used() can be something as:
> “Return bits used by the updated translation regime that MUST be observed
> atomically” and in that case we can ignore things as MEV as it doesn’t
> impact the translation.
Aside from the above this would cause problems with the validation
assertions, so it is not a great idea.
> However, this approach makes it a bit explicit which bits are ignored, if we
> keep this logic, I think changing the name of get_ignored() might help, to
> something as "get_allowed_break()" or "get_update_safe()"?
update_safe sounds good to me
Jason
On Mon, Dec 15, 2025 at 08:09:52PM -0400, Jason Gunthorpe wrote:
> On Sun, Dec 14, 2025 at 10:32:35PM +0000, Mostafa Saleh wrote:
> > > * Figure out if we can do a hitless update of entry to become target. Returns a
> > > * bit mask where 1 indicates that qword needs to be set disruptively.
> > > @@ -1094,13 +1100,22 @@ static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
> > > {
> > > __le64 target_used[NUM_ENTRY_QWORDS] = {};
> > > __le64 cur_used[NUM_ENTRY_QWORDS] = {};
> > > + __le64 ignored[NUM_ENTRY_QWORDS] = {};
> >
> > I think we can avoid extra stack allocation for another STE, if we make
> > the function update cur_used directly, but no strong opinion.
>
> It does more than just mask cur_used, it also adjusts ignored:
>
> > > + /*
> > > + * Ignored is only used for bits that are used by both entries,
> > > + * otherwise it is sequenced according to the unused entry.
> > > + */
> > > + ignored[i] &= target_used[i] & cur_used[i];
>
> Which also explains this:
I haven't tested that, but I was thinking about (applies on top of the patches)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 72ba41591fdb..9981eefcf0da 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1083,7 +1083,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used);
VISIBLE_IF_KUNIT
-void arm_smmu_get_ste_ignored(__le64 *ignored_bits)
+void arm_smmu_get_ste_ignored(__le64 *used)
{
/*
* MEV does not meaningfully impact the operation of the HW, it only
@@ -1093,17 +1093,14 @@ void arm_smmu_get_ste_ignored(__le64 *ignored_bits)
*
* Note: Software must expect, and be able to deal with, coalesced
* fault records even when MEV == 0.
- */
- ignored_bits[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
-
- /*
+ *
* EATS is used to reject and control the ATS behavior of the device. If
* we are changing it away from 0 then we already trust the device to
* use ATS properly and we have sequenced the device's ATS enable in PCI
* config space to prevent it from issuing ATS while we are changing
* EATS.
*/
- ignored_bits[1] |= cpu_to_le64(STRTAB_STE_1_EATS);
+ used[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_MEV);
}
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_ignored);
@@ -1119,22 +1116,15 @@ static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
{
__le64 target_used[NUM_ENTRY_QWORDS] = {};
__le64 cur_used[NUM_ENTRY_QWORDS] = {};
- __le64 ignored[NUM_ENTRY_QWORDS] = {};
u8 used_qword_diff = 0;
unsigned int i;
writer->ops->get_used(entry, cur_used);
writer->ops->get_used(target, target_used);
- if (writer->ops->get_ignored)
- writer->ops->get_ignored(ignored);
+ if (writer->ops->filter_ignored)
+ writer->ops->filter_ignored(cur_used);
for (i = 0; i != NUM_ENTRY_QWORDS; i++) {
- /*
- * Ignored is only used for bits that are used by both entries,
- * otherwise it is sequenced according to the unused entry.
- */
- ignored[i] &= target_used[i] & cur_used[i];
-
/*
* Check that masks are up to date, the make functions are not
* allowed to set a bit to 1 if the used function doesn't say it
@@ -1142,8 +1132,6 @@ static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
*/
WARN_ON_ONCE(target[i] & ~target_used[i]);
- /* Bits can change because they are not currently being used */
- cur_used[i] &= ~ignored[i];
unused_update[i] = (entry[i] & cur_used[i]) |
(target[i] & ~cur_used[i]);
/*
@@ -1575,7 +1563,7 @@ static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer)
static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
.sync = arm_smmu_ste_writer_sync_entry,
.get_used = arm_smmu_get_ste_used,
- .get_ignored = arm_smmu_get_ste_ignored,
+ .filter_ignored = arm_smmu_get_ste_ignored,
};
static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index d5f0e5407b9f..97b995974049 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -900,7 +900,7 @@ struct arm_smmu_entry_writer {
struct arm_smmu_entry_writer_ops {
void (*get_used)(const __le64 *entry, __le64 *used);
- void (*get_ignored)(__le64 *ignored_bits);
+ void (*filter_ignored)(__le64 *used);
void (*sync)(struct arm_smmu_entry_writer *writer);
};
And we only clear the bits from cur_used, there is no need to for the
other mask ignored (ignored[i] &= target_used[i] & cur_used[i])
- If an ignored bit is not in cur_used it will not impact
"cur_used[i] &= ~ignored[i]" as it must be already zero
- If an ignored bit is not in target_used, it doesn't really matter,
we can ignore it anyway, as it is safe to do so.
Anyway, I am not fixated on that though, extra 64B on the stack is not that bad.
>
> > I have some mixed feelings about this, having get_used(), then get_ignored()
> > with the same bits set seems confusing to me, specially the get_ignored()
> > loops back to update cur_used, which is set from get_used()
>
> The same bits are set because of the above - we need to know what the
> actual used bits are to decide if we need to rely on the ignored rule
> to do the update.
>
> > My initial though was just to remove this bit from get_used() + some changes
> > to checks setting bits that are not used would be enough, and the semantics
> > of get_used() can be something as:
> > “Return bits used by the updated translation regime that MUST be observed
> > atomically” and in that case we can ignore things as MEV as it doesn’t
> > impact the translation.
>
> Aside from the above this would cause problems with the validation
> assertions, so it is not a great idea.
Yes, that's why I didn't like this, it had to hack the validation logic.
Thanks,
Mostafa
>
> > However, this approach makes it a bit explicit which bits are ignored, if we
> > keep this logic, I think changing the name of get_ignored() might help, to
> > something as "get_allowed_break()" or "get_update_safe()"?
>
> update_safe sounds good to me
>
> Jason
On Tue, Dec 16, 2025 at 10:58:33PM +0000, Mostafa Saleh wrote:
> for (i = 0; i != NUM_ENTRY_QWORDS; i++) {
> - /*
> - * Ignored is only used for bits that are used by both entries,
> - * otherwise it is sequenced according to the unused entry.
> - */
> - ignored[i] &= target_used[i] & cur_used[i];
> -
It is not functionally the same thing without this..
> And we only clear the bits from cur_used, there is no need to for the
> other mask ignored (ignored[i] &= target_used[i] & cur_used[i])
> - If an ignored bit is not in cur_used it will not impact
> "cur_used[i] &= ~ignored[i]" as it must be already zero
> - If an ignored bit is not in target_used, it doesn't really matter,
> we can ignore it anyway, as it is safe to do so.
That was an earlier version, it was switched away to this so as to be
less of a change though the reasoning is sound.
Jason
On Mon, Dec 15, 2025 at 08:09:52PM -0400, Jason Gunthorpe wrote: > On Sun, Dec 14, 2025 at 10:32:35PM +0000, Mostafa Saleh wrote: > > However, this approach makes it a bit explicit which bits are ignored, if we > > keep this logic, I think changing the name of get_ignored() might help, to > > something as "get_allowed_break()" or "get_update_safe()"? > > update_safe sounds good to me I have renamed the op and changed entry_set to void. I'll send v4 later today. Thanks Nicolin
Hi Mostafa,
On Sun, Dec 14, 2025 at 10:32:35PM +0000, Mostafa Saleh wrote:
> On Tue, Dec 09, 2025 at 06:45:16PM -0800, Nicolin Chen wrote:
> > @@ -1207,12 +1223,9 @@ void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry,
> > entry_set(writer, entry, target, 0, 1);
> > } else {
> > /*
> > - * No inuse bit changed. Sanity check that all unused bits are 0
> > - * in the entry. The target was already sanity checked by
> > - * compute_qword_diff().
> > + * No inuse bit changed, though ignored bits may have changed.
> > */
> > - WARN_ON_ONCE(
> > - entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
> > + entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS);
>
> After this change, no other caller uses the entry_set() return value, so it
> can be changed to return void.
OK.
> > }
> > }
> > EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
> > @@ -1543,6 +1556,7 @@ static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer)
> > static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
> > .sync = arm_smmu_ste_writer_sync_entry,
> > .get_used = arm_smmu_get_ste_used,
> > + .get_ignored = arm_smmu_get_ste_ignored,
> > };
> >
>
> I have some mixed feelings about this, having get_used(), then get_ignored()
> with the same bits set seems confusing to me, specially the get_ignored()
> loops back to update cur_used, which is set from get_used()
>
> My initial though was just to remove this bit from get_used() + some changes
> to checks setting bits that are not used would be enough, and the semantics
> of get_used() can be something as:
> “Return bits used by the updated translation regime that MUST be observed
> atomically” and in that case we can ignore things as MEV as it doesn’t
> impact the translation.
>
> However, this approach makes it a bit explicit which bits are ignored, if we
> keep this logic, I think changing the name of get_ignored() might help, to
> something as "get_allowed_break()" or "get_update_safe()"?
I think "ignored" itself is brief and understandable.. Instead,
perhaps we can add a kdocs to make it clearer:
/**
* struct arm_smmu_entry_writer_ops - STE/CD entry writer operations
* @get_used: Output to @used the bits used by the hardware corresponding to the
* configurations bits set in a given @entry
* @get_ignored: Output to @ignored the bits that are listed in the "used" list
* but allowed to be ignored by arm_smmu_entry_qword_diff(). Each
* field (bits) must provide a reason to justify that the entries
* can be updated safely without breaking STE/CD configurations.
* @sync: Operation to synchronize the updated STE/CD entries in the memory
*/
struct arm_smmu_entry_writer_ops {
void (*get_used)(const __le64 *entry, __le64 *used);
void (*get_ignored)(__le64 *ignored);
void (*sync)(struct arm_smmu_entry_writer *writer);
};
?
Thanks
Nicolin
On Mon, Dec 15, 2025 at 8:51 PM Nicolin Chen <nicolinc@nvidia.com> wrote:
>
> Hi Mostafa,
>
> On Sun, Dec 14, 2025 at 10:32:35PM +0000, Mostafa Saleh wrote:
> > On Tue, Dec 09, 2025 at 06:45:16PM -0800, Nicolin Chen wrote:
> > > @@ -1207,12 +1223,9 @@ void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry,
> > > entry_set(writer, entry, target, 0, 1);
> > > } else {
> > > /*
> > > - * No inuse bit changed. Sanity check that all unused bits are 0
> > > - * in the entry. The target was already sanity checked by
> > > - * compute_qword_diff().
> > > + * No inuse bit changed, though ignored bits may have changed.
> > > */
> > > - WARN_ON_ONCE(
> > > - entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
> > > + entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS);
> >
> > After this change, no other caller uses the entry_set() return value, so it
> > can be changed to return void.
>
> OK.
>
> > > }
> > > }
> > > EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
> > > @@ -1543,6 +1556,7 @@ static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer)
> > > static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
> > > .sync = arm_smmu_ste_writer_sync_entry,
> > > .get_used = arm_smmu_get_ste_used,
> > > + .get_ignored = arm_smmu_get_ste_ignored,
> > > };
> > >
> >
> > I have some mixed feelings about this, having get_used(), then get_ignored()
> > with the same bits set seems confusing to me, specially the get_ignored()
> > loops back to update cur_used, which is set from get_used()
> >
> > My initial though was just to remove this bit from get_used() + some changes
> > to checks setting bits that are not used would be enough, and the semantics
> > of get_used() can be something as:
> > “Return bits used by the updated translation regime that MUST be observed
> > atomically” and in that case we can ignore things as MEV as it doesn’t
> > impact the translation.
> >
> > However, this approach makes it a bit explicit which bits are ignored, if we
> > keep this logic, I think changing the name of get_ignored() might help, to
> > something as "get_allowed_break()" or "get_update_safe()"?
>
> I think "ignored" itself is brief and understandable.. Instead,
> perhaps we can add a kdocs to make it clearer:
>
> /**
> * struct arm_smmu_entry_writer_ops - STE/CD entry writer operations
> * @get_used: Output to @used the bits used by the hardware corresponding to the
> * configurations bits set in a given @entry
> * @get_ignored: Output to @ignored the bits that are listed in the "used" list
> * but allowed to be ignored by arm_smmu_entry_qword_diff(). Each
> * field (bits) must provide a reason to justify that the entries
> * can be updated safely without breaking STE/CD configurations.
> * @sync: Operation to synchronize the updated STE/CD entries in the memory
> */
> struct arm_smmu_entry_writer_ops {
> void (*get_used)(const __le64 *entry, __le64 *used);
> void (*get_ignored)(__le64 *ignored);
> void (*sync)(struct arm_smmu_entry_writer *writer);
> };
>
> ?
>
A comment is indeed helpful, but my point was that "used" and
"ignored" make it seem that they are mutually exclusive.
Thanks,
Mostafa
> Thanks
> Nicolin
© 2016 - 2026 Red Hat, Inc.