When such ranges can't be represented as 1:1 mappings in page tables,
reject them as presumably bogus. Note that when we detect features late
(because of EFRSup being clear in the ACPI tables), it would be quite a
bit of work to check for (and drop) out of range IVMD ranges, so IOMMU
initialization gets failed in this case instead.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul@xen.org>
---
v7: Re-base.
v6: Re-base.
v5: New.
--- a/xen/drivers/passthrough/amd/iommu.h
+++ b/xen/drivers/passthrough/amd/iommu.h
@@ -307,6 +307,7 @@ extern struct hpet_sbdf {
} hpet_sbdf;
extern unsigned int amd_iommu_acpi_info;
+extern unsigned int amd_iommu_max_paging_mode;
extern int amd_iommu_min_paging_mode;
extern void *shared_intremap_table;
@@ -360,7 +361,7 @@ static inline int amd_iommu_get_paging_m
while ( max_frames > PTE_PER_TABLE_SIZE )
{
max_frames = PTE_PER_TABLE_ALIGN(max_frames) >> PTE_PER_TABLE_SHIFT;
- if ( ++level > 6 )
+ if ( ++level > amd_iommu_max_paging_mode )
return -ENOMEM;
}
--- a/xen/drivers/passthrough/amd/iommu_acpi.c
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
@@ -378,6 +378,7 @@ static int __init parse_ivmd_device_iomm
static int __init parse_ivmd_block(const struct acpi_ivrs_memory *ivmd_block)
{
unsigned long start_addr, mem_length, base, limit;
+ unsigned int addr_bits;
bool iw = true, ir = true, exclusion = false;
if ( ivmd_block->header.length < sizeof(*ivmd_block) )
@@ -394,6 +395,17 @@ static int __init parse_ivmd_block(const
AMD_IOMMU_DEBUG("IVMD Block: type %#x phys %#lx len %#lx\n",
ivmd_block->header.type, start_addr, mem_length);
+ addr_bits = min(MASK_EXTR(amd_iommu_acpi_info, ACPI_IVRS_PHYSICAL_SIZE),
+ MASK_EXTR(amd_iommu_acpi_info, ACPI_IVRS_VIRTUAL_SIZE));
+ if ( amd_iommu_get_paging_mode(PFN_UP(start_addr + mem_length)) < 0 ||
+ (addr_bits < BITS_PER_LONG &&
+ ((start_addr + mem_length - 1) >> addr_bits)) )
+ {
+ AMD_IOMMU_DEBUG("IVMD: [%lx,%lx) is not IOMMU addressable\n",
+ start_addr, start_addr + mem_length);
+ return 0;
+ }
+
if ( !e820_all_mapped(base, limit + PAGE_SIZE, E820_RESERVED) )
{
paddr_t addr;
--- a/xen/drivers/passthrough/amd/iommu_detect.c
+++ b/xen/drivers/passthrough/amd/iommu_detect.c
@@ -67,6 +67,9 @@ void __init get_iommu_features(struct am
iommu->features.raw =
readq(iommu->mmio_base + IOMMU_EXT_FEATURE_MMIO_OFFSET);
+
+ if ( 4 + iommu->features.flds.hats < amd_iommu_max_paging_mode )
+ amd_iommu_max_paging_mode = 4 + iommu->features.flds.hats;
}
/* Don't log the same set of features over and over. */
@@ -200,6 +203,10 @@ int __init amd_iommu_detect_one_acpi(
else if ( list_empty(&amd_iommu_head) )
AMD_IOMMU_DEBUG("EFRSup not set in ACPI table; will fall back to hardware\n");
+ if ( (amd_iommu_acpi_info & ACPI_IVRS_EFR_SUP) &&
+ 4 + iommu->features.flds.hats < amd_iommu_max_paging_mode )
+ amd_iommu_max_paging_mode = 4 + iommu->features.flds.hats;
+
/* override IOMMU HT flags */
iommu->ht_flags = ivhd_block->header.flags;
--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -1376,6 +1376,13 @@ static int __init amd_iommu_prepare_one(
get_iommu_features(iommu);
+ /*
+ * Late extended feature determination may cause previously mappable
+ * IVMD ranges to become unmappable.
+ */
+ if ( amd_iommu_max_paging_mode < amd_iommu_min_paging_mode )
+ return -ERANGE;
+
return 0;
}
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -254,6 +254,7 @@ int amd_iommu_alloc_root(struct domain *
return 0;
}
+unsigned int __read_mostly amd_iommu_max_paging_mode = 6;
int __read_mostly amd_iommu_min_paging_mode = 1;
static int amd_iommu_domain_init(struct domain *d)