From: Eric Auger <eric.auger@redhat.com>
To handle SMMUv3 nested stage support it is practical to
expose the guest with reserved memory regions (RMRs)
covering the IOVAs used by the host kernel to map
physical MSI doorbells.
Those IOVAs belong to [0x8000000, 0x8100000] matching
MSI_IOVA_BASE and MSI_IOVA_LENGTH definitions in kernel
arm-smmu-v3 driver. This is the window used to allocate
IOVAs matching physical MSI doorbells.
With those RMRs, the guest is forced to use a flat mapping
for this range. Hence the assigned device is programmed
with one IOVA from this range. Stage 1, owned by the guest
has a flat mapping for this IOVA. Stage2, owned by the VMM
then enforces a mapping from this IOVA to the physical
MSI doorbell.
The creation of those RMR nodes only is relevant if nested
stage SMMU is in use, along with VFIO. As VFIO devices can be
hotplugged, all RMRs need to be created in advance. Hence
the patch introduces a new arm virt "nested-smmuv3" iommu type.
ARM DEN 0049E.b IORT specification also mandates that when
RMRs are present, the OS must preserve PCIe configuration
performed by the boot FW. So along with the RMR IORT nodes,
a _DSM function #5, as defined by PCI FIRMWARE SPECIFICATION
EVISION 3.3, chapter 4.6.5 is added to PCIe host bridge
and PCIe expander bridge objects.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Suggested-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
hw/arm/virt-acpi-build.c | 77 +++++++++++++++++++++++++++++++++++-----
1 file changed, 68 insertions(+), 9 deletions(-)
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index ec4cdfb2d7..f327ca59ec 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -132,6 +132,14 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
.bus = vms->bus,
};
+ /*
+ * Nested SMMU requires RMRs for MSI 1-1 mapping, which
+ * require _DSM for PreservingPCI Boot Configurations
+ */
+ if (vms->iommu == VIRT_IOMMU_SMMUV3_NESTED) {
+ cfg.preserve_config = true;
+ }
+
if (vms->highmem_mmio) {
cfg.mmio64 = memmap[VIRT_HIGH_PCIE_MMIO];
}
@@ -216,16 +224,16 @@ static void acpi_dsdt_add_tpm(Aml *scope, VirtMachineState *vms)
*
* Note that @id_count gets internally subtracted by one, following the spec.
*/
-static void build_iort_id_mapping(GArray *table_data, uint32_t input_base,
- uint32_t id_count, uint32_t out_ref)
+static void
+build_iort_id_mapping(GArray *table_data, uint32_t input_base,
+ uint32_t id_count, uint32_t out_ref, uint32_t flags)
{
build_append_int_noprefix(table_data, input_base, 4); /* Input base */
/* Number of IDs - The number of IDs in the range minus one */
build_append_int_noprefix(table_data, id_count - 1, 4);
build_append_int_noprefix(table_data, input_base, 4); /* Output base */
build_append_int_noprefix(table_data, out_ref, 4); /* Output Reference */
- /* Flags */
- build_append_int_noprefix(table_data, 0 /* Single mapping (disabled) */, 4);
+ build_append_int_noprefix(table_data, flags, 4); /* Flags */
}
struct AcpiIortIdMapping {
@@ -267,6 +275,50 @@ static int iort_idmap_compare(gconstpointer a, gconstpointer b)
return idmap_a->input_base - idmap_b->input_base;
}
+static void
+build_iort_rmr_nodes(GArray *table_data, GArray *smmu_idmaps,
+ size_t *smmu_offset, uint32_t *id)
+{
+ AcpiIortIdMapping *range;
+ int i;
+
+ for (i = 0; i < smmu_idmaps->len; i++) {
+ range = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i);
+ int bdf = range->input_base;
+
+ /* Table 18 Reserved Memory Range Node */
+
+ build_append_int_noprefix(table_data, 6 /* RMR */, 1); /* Type */
+ /* Length */
+ build_append_int_noprefix(table_data, 28 + ID_MAPPING_ENTRY_SIZE + 20, 2);
+ build_append_int_noprefix(table_data, 3, 1); /* Revision */
+ build_append_int_noprefix(table_data, *id, 4); /* Identifier */
+ /* Number of ID mappings */
+ build_append_int_noprefix(table_data, 1, 4);
+ /* Reference to ID Array */
+ build_append_int_noprefix(table_data, 28, 4);
+
+ /* RMR specific data */
+
+ /* Flags */
+ build_append_int_noprefix(table_data, 0 /* Disallow remapping */, 4);
+ /* Number of Memory Range Descriptors */
+ build_append_int_noprefix(table_data, 1 , 4);
+ /* Reference to Memory Range Descriptors */
+ build_append_int_noprefix(table_data, 28 + ID_MAPPING_ENTRY_SIZE, 4);
+ build_iort_id_mapping(table_data, bdf, range->id_count, smmu_offset[i], 1);
+
+ /* Table 19 Memory Range Descriptor */
+
+ /* Physical Range offset */
+ build_append_int_noprefix(table_data, 0x8000000, 8);
+ /* Physical Range length */
+ build_append_int_noprefix(table_data, 0x100000, 8);
+ build_append_int_noprefix(table_data, 0, 4); /* Reserved */
+ *id += 1;
+ }
+}
+
/*
* Input Output Remapping Table (IORT)
* Conforms to "IO Remapping Table System Software on ARM Platforms",
@@ -284,7 +336,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
GArray *smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping));
GArray *its_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping));
- AcpiTable table = { .sig = "IORT", .rev = 3, .oem_id = vms->oem_id,
+ AcpiTable table = { .sig = "IORT", .rev = 5, .oem_id = vms->oem_id,
.oem_table_id = vms->oem_table_id };
/* Table 2 The IORT */
acpi_table_begin(&table, table_data);
@@ -325,6 +377,9 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
}
next_range.input_base = idmap->input_base + idmap->id_count;
+ if (vms->iommu == VIRT_IOMMU_SMMUV3_NESTED) {
+ nb_nodes++; /* RMR node per SMMU */
+ }
}
/* Append the last RC -> ITS ID mapping */
@@ -386,7 +441,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
build_append_int_noprefix(table_data, 0, 4);
/* output IORT node is the ITS group node (the first node) */
- build_iort_id_mapping(table_data, 0, 0x10000, IORT_NODE_OFFSET);
+ build_iort_id_mapping(table_data, 0, 0x10000, IORT_NODE_OFFSET, 0);
}
/* Table 17 Root Complex Node */
@@ -427,7 +482,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
range = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i);
/* output IORT node is the smmuv3 node */
build_iort_id_mapping(table_data, range->input_base,
- range->id_count, smmu_offset[i]);
+ range->id_count, smmu_offset[i], 0);
}
/* bypassed RIDs connect to ITS group node directly: RC -> ITS */
@@ -435,11 +490,15 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
range = &g_array_index(its_idmaps, AcpiIortIdMapping, i);
/* output IORT node is the ITS group node (the first node) */
build_iort_id_mapping(table_data, range->input_base,
- range->id_count, IORT_NODE_OFFSET);
+ range->id_count, IORT_NODE_OFFSET, 0);
}
} else {
/* output IORT node is the ITS group node (the first node) */
- build_iort_id_mapping(table_data, 0, 0x10000, IORT_NODE_OFFSET);
+ build_iort_id_mapping(table_data, 0, 0x10000, IORT_NODE_OFFSET, 0);
+ }
+
+ if (vms->iommu == VIRT_IOMMU_SMMUV3_NESTED) {
+ build_iort_rmr_nodes(table_data, smmu_idmaps, smmu_offset, &id);
}
acpi_table_end(linker, &table);
--
2.34.1
On Fri, Nov 08, 2024 at 12:52:42PM +0000, Shameer Kolothum wrote: > From: Eric Auger <eric.auger@redhat.com> > > To handle SMMUv3 nested stage support it is practical to > expose the guest with reserved memory regions (RMRs) > covering the IOVAs used by the host kernel to map > physical MSI doorbells. There has been an ongoing solution for MSI alternative: https://lore.kernel.org/kvm/cover.1731130093.git.nicolinc@nvidia.com/ So, I think we should keep this patch out of this series, instead put it on top of the testing branch. Thanks Nicolin
> -----Original Message----- > From: Nicolin Chen <nicolinc@nvidia.com> > Sent: Wednesday, November 13, 2024 6:31 PM > To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> > Cc: qemu-arm@nongnu.org; qemu-devel@nongnu.org; > eric.auger@redhat.com; peter.maydell@linaro.org; jgg@nvidia.com; > ddutile@redhat.com; Linuxarm <linuxarm@huawei.com>; Wangzhou (B) > <wangzhou1@hisilicon.com>; jiangkunkun <jiangkunkun@huawei.com>; > Jonathan Cameron <jonathan.cameron@huawei.com>; > zhangfei.gao@linaro.org > Subject: Re: [RFC PATCH 5/5] hw/arm/virt-acpi-build: Add IORT RMR regions > to handle MSI nested binding > > On Fri, Nov 08, 2024 at 12:52:42PM +0000, Shameer Kolothum wrote: > > From: Eric Auger <eric.auger@redhat.com> > > > > To handle SMMUv3 nested stage support it is practical to expose the > > guest with reserved memory regions (RMRs) covering the IOVAs used by > > the host kernel to map physical MSI doorbells. > > There has been an ongoing solution for MSI alternative: > https://lore.kernel.org/kvm/cover.1731130093.git.nicolinc@nvidia.com/ > > So, I think we should keep this patch out of this series, instead put it on top > of the testing branch. Yes. I think then we can support DT solution as well. On that MSI RFC above, have you seen Eric's earlier/initial proposal to bind the Guest MSI in nested cases. IIRC, it was providing an IOCTL and then creating a mapping in the host. I think this is the latest on that. https://lore.kernel.org/linux-iommu/20210411114659.15051-4-eric.auger@redhat.com/ But not sure, why we then moved to RMR approach. Eric? Thanks, Shameer
Hi Shameer, On 11/14/24 09:48, Shameerali Kolothum Thodi wrote: > >> -----Original Message----- >> From: Nicolin Chen <nicolinc@nvidia.com> >> Sent: Wednesday, November 13, 2024 6:31 PM >> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> >> Cc: qemu-arm@nongnu.org; qemu-devel@nongnu.org; >> eric.auger@redhat.com; peter.maydell@linaro.org; jgg@nvidia.com; >> ddutile@redhat.com; Linuxarm <linuxarm@huawei.com>; Wangzhou (B) >> <wangzhou1@hisilicon.com>; jiangkunkun <jiangkunkun@huawei.com>; >> Jonathan Cameron <jonathan.cameron@huawei.com>; >> zhangfei.gao@linaro.org >> Subject: Re: [RFC PATCH 5/5] hw/arm/virt-acpi-build: Add IORT RMR regions >> to handle MSI nested binding >> >> On Fri, Nov 08, 2024 at 12:52:42PM +0000, Shameer Kolothum wrote: >>> From: Eric Auger <eric.auger@redhat.com> >>> >>> To handle SMMUv3 nested stage support it is practical to expose the >>> guest with reserved memory regions (RMRs) covering the IOVAs used by >>> the host kernel to map physical MSI doorbells. >> There has been an ongoing solution for MSI alternative: >> https://lore.kernel.org/kvm/cover.1731130093.git.nicolinc@nvidia.com/ >> >> So, I think we should keep this patch out of this series, instead put it on top >> of the testing branch. > Yes. I think then we can support DT solution as well. > > On that MSI RFC above, have you seen Eric's earlier/initial proposal to bind the Guest MSI in > nested cases. IIRC, it was providing an IOCTL and then creating a mapping in the host. > > I think this is the latest on that. > https://lore.kernel.org/linux-iommu/20210411114659.15051-4-eric.auger@redhat.com/ yes this is the latest before I stopped my VFIO integration efforts. > > But not sure, why we then moved to RMR approach. Eric? This was indeed the 1st integration approach. Using RMR instead was suggested by Jean-Philippe and I considered it as simpler (because we needed the SET_MSI_BINDING iotcl) so I changed the approach. Thanks Eric > > Thanks, > Shameer >
On Thu, Nov 14, 2024 at 11:41:58AM +0100, Eric Auger wrote: > Hi Shameer, > > On 11/14/24 09:48, Shameerali Kolothum Thodi wrote: > > > >> -----Original Message----- > >> From: Nicolin Chen <nicolinc@nvidia.com> > >> Sent: Wednesday, November 13, 2024 6:31 PM > >> To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> > >> Cc: qemu-arm@nongnu.org; qemu-devel@nongnu.org; > >> eric.auger@redhat.com; peter.maydell@linaro.org; jgg@nvidia.com; > >> ddutile@redhat.com; Linuxarm <linuxarm@huawei.com>; Wangzhou (B) > >> <wangzhou1@hisilicon.com>; jiangkunkun <jiangkunkun@huawei.com>; > >> Jonathan Cameron <jonathan.cameron@huawei.com>; > >> zhangfei.gao@linaro.org > >> Subject: Re: [RFC PATCH 5/5] hw/arm/virt-acpi-build: Add IORT RMR regions > >> to handle MSI nested binding > >> > >> On Fri, Nov 08, 2024 at 12:52:42PM +0000, Shameer Kolothum wrote: > >>> From: Eric Auger <eric.auger@redhat.com> > >>> > >>> To handle SMMUv3 nested stage support it is practical to expose the > >>> guest with reserved memory regions (RMRs) covering the IOVAs used by > >>> the host kernel to map physical MSI doorbells. > >> There has been an ongoing solution for MSI alternative: > >> https://lore.kernel.org/kvm/cover.1731130093.git.nicolinc@nvidia.com/ > >> > >> So, I think we should keep this patch out of this series, instead put it on top > >> of the testing branch. > > Yes. I think then we can support DT solution as well. > > > > On that MSI RFC above, have you seen Eric's earlier/initial proposal to bind the Guest MSI in > > nested cases. IIRC, it was providing an IOCTL and then creating a mapping in the host. > > > > I think this is the latest on that. > > https://lore.kernel.org/linux-iommu/20210411114659.15051-4-eric.auger@redhat.com/ > yes this is the latest before I stopped my VFIO integration efforts. > > > > But not sure, why we then moved to RMR approach. Eric? > > This was indeed the 1st integration approach. Using RMR instead was > suggested by Jean-Philippe and I considered it as simpler (because we > needed the SET_MSI_BINDING iotcl) so I changed the approach. Oh, I didn't realized Eric had this.. Now, Robin wanted it back (in iommufd though), against the RMR :-/ Nicolin
© 2016 - 2024 Red Hat, Inc.