drivers/iommu/amd/io_pgtable.c | 6 ++++-- drivers/iommu/amd/io_pgtable_v2.c | 6 +++++- 2 files changed, 9 insertions(+), 3 deletions(-)
If paddr is a mmio address, clear the SME flag. It makes no sense to
set SME bit on MMIO address.
---
drivers/iommu/amd/io_pgtable.c | 6 ++++--
drivers/iommu/amd/io_pgtable_v2.c | 6 +++++-
2 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 4d308c071134..88b204449c2c 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -352,15 +352,17 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
updated = true;
if (count > 1) {
- __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
+ __pte = PAGE_SIZE_PTE(paddr, pgsize);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
} else
- __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
+ __pte = paddr | IOMMU_PTE_PR | IOMMU_PTE_FC;
if (prot & IOMMU_PROT_IR)
__pte |= IOMMU_PTE_IR;
if (prot & IOMMU_PROT_IW)
__pte |= IOMMU_PTE_IW;
+ if (pfn_valid(__phys_to_pfn(paddr)))
+ __pte = __sme_set(__pte);
for (i = 0; i < count; ++i)
pte[i] = __pte;
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index b47941353ccb..b301fb8e58fa 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -65,7 +65,11 @@ static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
{
u64 pte;
- pte = __sme_set(paddr & PM_ADDR_MASK);
+ if (pfn_valid(__phys_to_pfn(paddr)))
+ pte = __sme_set(paddr & PM_ADDR_MASK);
+ else
+ pte = paddr & PM_ADDR_MASK;
+
pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
--
2.43.0
On Wed, Jun 25, 2025 at 02:48:02PM +0800, YangWencheng wrote: > If paddr is a mmio address, clear the SME flag. It makes no sense to > set SME bit on MMIO address. No Signed-off-by.
On 2025-06-25 7:48 am, YangWencheng wrote: > If paddr is a mmio address, clear the SME flag. It makes no sense to > set SME bit on MMIO address. Arguably it also doesn't make sense for callers to be mapping MMIO addresses without IOMMU_MMIO... > --- > drivers/iommu/amd/io_pgtable.c | 6 ++++-- > drivers/iommu/amd/io_pgtable_v2.c | 6 +++++- > 2 files changed, 9 insertions(+), 3 deletions(-) > > diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c > index 4d308c071134..88b204449c2c 100644 > --- a/drivers/iommu/amd/io_pgtable.c > +++ b/drivers/iommu/amd/io_pgtable.c > @@ -352,15 +352,17 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, > updated = true; > > if (count > 1) { > - __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize); > + __pte = PAGE_SIZE_PTE(paddr, pgsize); > __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC; > } else > - __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC; > + __pte = paddr | IOMMU_PTE_PR | IOMMU_PTE_FC; > > if (prot & IOMMU_PROT_IR) > __pte |= IOMMU_PTE_IR; > if (prot & IOMMU_PROT_IW) > __pte |= IOMMU_PTE_IW; > + if (pfn_valid(__phys_to_pfn(paddr))) As usual, pfn_valid() isn't really appropriate for this anyway, since all it means is "does a struct page exist?", and in general it is entirely possible for (reserved) pages to exist for non-RAM addresses. Thanks, Robin. > + __pte = __sme_set(__pte); > > for (i = 0; i < count; ++i) > pte[i] = __pte; > diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c > index b47941353ccb..b301fb8e58fa 100644 > --- a/drivers/iommu/amd/io_pgtable_v2.c > +++ b/drivers/iommu/amd/io_pgtable_v2.c > @@ -65,7 +65,11 @@ static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot) > { > u64 pte; > > - pte = __sme_set(paddr & PM_ADDR_MASK); > + if (pfn_valid(__phys_to_pfn(paddr))) > + pte = __sme_set(paddr & PM_ADDR_MASK); > + else > + pte = paddr & PM_ADDR_MASK; > + > pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER; > pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY; >
© 2016 - 2025 Red Hat, Inc.