Add a function that generates a Virtual I/O Translation table (VIOT),
describing the topology of paravirtual IOMMUs. The table is created when
instantiating a virtio-iommu device. It contains a virtio-iommu node and
PCI Range nodes for endpoints managed by the IOMMU. By default, a single
node describes all PCI devices. When passing the "default_bus_bypass_iommu"
machine option and "bypass_iommu" PXB option, only buses that do not
bypass the IOMMU are described by PCI Range nodes.
Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
hw/acpi/Kconfig | 4 ++
hw/acpi/viot.h | 13 +++++
hw/acpi/viot.c | 112 ++++++++++++++++++++++++++++++++++++++++++++
hw/acpi/meson.build | 1 +
4 files changed, 130 insertions(+)
create mode 100644 hw/acpi/viot.h
create mode 100644 hw/acpi/viot.c
diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig
index cfc4ede8d9..abad79c103 100644
--- a/hw/acpi/Kconfig
+++ b/hw/acpi/Kconfig
@@ -41,6 +41,10 @@ config ACPI_VMGENID
default y
depends on PC
+config ACPI_VIOT
+ bool
+ depends on ACPI
+
config ACPI_HW_REDUCED
bool
select ACPI
diff --git a/hw/acpi/viot.h b/hw/acpi/viot.h
new file mode 100644
index 0000000000..4cef29a640
--- /dev/null
+++ b/hw/acpi/viot.h
@@ -0,0 +1,13 @@
+/*
+ * ACPI Virtual I/O Translation Table implementation
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef VIOT_H
+#define VIOT_H
+
+void build_viot(GArray *table_data, BIOSLinker *linker,
+ uint16_t virtio_iommu_bdf, const char *oem_id,
+ const char *oem_table_id);
+
+#endif /* VIOT_H */
diff --git a/hw/acpi/viot.c b/hw/acpi/viot.c
new file mode 100644
index 0000000000..81bb2e93ff
--- /dev/null
+++ b/hw/acpi/viot.c
@@ -0,0 +1,112 @@
+/*
+ * ACPI Virtual I/O Translation table implementation
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#include "qemu/osdep.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/acpi/viot.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_host.h"
+
+struct viot_pci_ranges {
+ GArray *blob;
+ size_t count;
+ uint16_t output_node;
+};
+
+/* Build PCI range for a given PCI host bridge */
+static int viot_host_bridges(Object *obj, void *opaque)
+{
+ struct viot_pci_ranges *pci_ranges = opaque;
+ GArray *blob = pci_ranges->blob;
+
+ if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) {
+ PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
+
+ if (bus && !pci_bus_bypass_iommu(bus)) {
+ int min_bus, max_bus;
+
+ pci_bus_range(bus, &min_bus, &max_bus);
+
+ /* Type */
+ build_append_int_noprefix(blob, ACPI_VIOT_NODE_PCI_RANGE, 1);
+ /* Reserved */
+ build_append_int_noprefix(blob, 0, 1);
+ /* Length */
+ build_append_int_noprefix(blob, sizeof(AcpiViotPciRange), 2);
+ /* Endpoint start */
+ build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 4);
+ /* PCI Segment start */
+ build_append_int_noprefix(blob, 0, 2);
+ /* PCI Segment end */
+ build_append_int_noprefix(blob, 0, 2);
+ /* PCI BDF start */
+ build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 2);
+ /* PCI BDF end */
+ build_append_int_noprefix(blob, PCI_BUILD_BDF(max_bus, 0xff), 2);
+ /* Output node */
+ build_append_int_noprefix(blob, pci_ranges->output_node, 2);
+ /* Reserved */
+ build_append_int_noprefix(blob, 0, 6);
+
+ pci_ranges->count++;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Generate a VIOT table with one PCI-based virtio-iommu that manages PCI
+ * endpoints.
+ */
+void build_viot(GArray *table_data, BIOSLinker *linker,
+ uint16_t virtio_iommu_bdf, const char *oem_id,
+ const char *oem_table_id)
+{
+ /* virtio-iommu node follows the header */
+ int viommu_off = sizeof(AcpiViot);
+ int viot_start = table_data->len;
+ struct viot_pci_ranges pci_ranges = {
+ .output_node = viommu_off,
+ .blob = g_array_new(false, true, 1),
+ };
+
+ /* Build the list of PCI ranges that this viommu manages */
+ object_child_foreach_recursive(object_get_root(), viot_host_bridges,
+ &pci_ranges);
+
+ /* VIOT header */
+ acpi_data_push(table_data, sizeof(AcpiTableHeader));
+ /* Node count */
+ build_append_int_noprefix(table_data, pci_ranges.count + 1, 2);
+ /* Node offset */
+ build_append_int_noprefix(table_data, viommu_off, 2);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 8);
+
+ /* Virtio-iommu based on virtio-pci */
+ /* Type */
+ build_append_int_noprefix(table_data, ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI, 1);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 1);
+ /* Length */
+ build_append_int_noprefix(table_data, sizeof(AcpiViotVirtioIommuPci), 2);
+ /* PCI Segment */
+ build_append_int_noprefix(table_data, 0, 2);
+ /* PCI BDF number */
+ build_append_int_noprefix(table_data, virtio_iommu_bdf, 2);
+ /* Reserved */
+ build_append_int_noprefix(table_data, 0, 8);
+
+ /* PCI ranges found above */
+ g_array_append_vals(table_data, pci_ranges.blob->data,
+ pci_ranges.blob->len);
+ g_array_free(pci_ranges.blob, true);
+
+ build_header(linker, table_data, (void *)(table_data->data + viot_start),
+ "VIOT", table_data->len - viot_start, 0, oem_id, oem_table_id);
+}
+
diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build
index 29f804d13e..a510988b27 100644
--- a/hw/acpi/meson.build
+++ b/hw/acpi/meson.build
@@ -16,6 +16,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device
acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c'))
acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c'))
acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('piix4.c', 'pcihp.c'))
+acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c'))
acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c'))
acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c'))
acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c'))
--
2.33.0
Hi Jean,
On 9/3/21 4:32 PM, Jean-Philippe Brucker wrote:
> Add a function that generates a Virtual I/O Translation table (VIOT),
> describing the topology of paravirtual IOMMUs. The table is created when
> instantiating a virtio-iommu device. It contains a virtio-iommu node and
> PCI Range nodes for endpoints managed by the IOMMU. By default, a single
> node describes all PCI devices. When passing the "default_bus_bypass_iommu"
> machine option and "bypass_iommu" PXB option, only buses that do not
> bypass the IOMMU are described by PCI Range nodes.
You may consider to add a test for the new VIOT table (However this may
be added afterwards). See tests/qtest/bios-tables-test.c for the process
and examples.
>
> Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
> ---
> hw/acpi/Kconfig | 4 ++
> hw/acpi/viot.h | 13 +++++
> hw/acpi/viot.c | 112 ++++++++++++++++++++++++++++++++++++++++++++
> hw/acpi/meson.build | 1 +
> 4 files changed, 130 insertions(+)
> create mode 100644 hw/acpi/viot.h
> create mode 100644 hw/acpi/viot.c
>
> diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig
> index cfc4ede8d9..abad79c103 100644
> --- a/hw/acpi/Kconfig
> +++ b/hw/acpi/Kconfig
> @@ -41,6 +41,10 @@ config ACPI_VMGENID
> default y
> depends on PC
>
> +config ACPI_VIOT
> + bool
> + depends on ACPI
> +
> config ACPI_HW_REDUCED
> bool
> select ACPI
> diff --git a/hw/acpi/viot.h b/hw/acpi/viot.h
> new file mode 100644
> index 0000000000..4cef29a640
> --- /dev/null
> +++ b/hw/acpi/viot.h
> @@ -0,0 +1,13 @@
> +/*
> + * ACPI Virtual I/O Translation Table implementation
> + *
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + */
> +#ifndef VIOT_H
> +#define VIOT_H
> +
> +void build_viot(GArray *table_data, BIOSLinker *linker,
> + uint16_t virtio_iommu_bdf, const char *oem_id,
> + const char *oem_table_id);
> +
> +#endif /* VIOT_H */
> diff --git a/hw/acpi/viot.c b/hw/acpi/viot.c
> new file mode 100644
> index 0000000000..81bb2e93ff
> --- /dev/null
> +++ b/hw/acpi/viot.c
> @@ -0,0 +1,112 @@
> +/*
> + * ACPI Virtual I/O Translation table implementation
> + *
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + */
> +#include "qemu/osdep.h"
> +#include "hw/acpi/acpi.h"
> +#include "hw/acpi/aml-build.h"
> +#include "hw/acpi/viot.h"
> +#include "hw/pci/pci.h"
> +#include "hw/pci/pci_host.h"
> +
> +struct viot_pci_ranges {
> + GArray *blob;
> + size_t count;
> + uint16_t output_node;
> +};
> +
> +/* Build PCI range for a given PCI host bridge */
chapter 1.3
> +static int viot_host_bridges(Object *obj, void *opaque)
> +{
> + struct viot_pci_ranges *pci_ranges = opaque;
> + GArray *blob = pci_ranges->blob;
> +
> + if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) {
> + PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
> +
> + if (bus && !pci_bus_bypass_iommu(bus)) {
> + int min_bus, max_bus;
> +
> + pci_bus_range(bus, &min_bus, &max_bus);
> +
> + /* Type */
> + build_append_int_noprefix(blob, ACPI_VIOT_NODE_PCI_RANGE, 1);
s/ACPI_VIOT_NODE_PCI_RANGE/1
> + /* Reserved */
> + build_append_int_noprefix(blob, 0, 1);
> + /* Length */
> + build_append_int_noprefix(blob, sizeof(AcpiViotPciRange), 2);
hardcode the size
> + /* Endpoint start */
> + build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 4);
> + /* PCI Segment start */
> + build_append_int_noprefix(blob, 0, 2);
> + /* PCI Segment end */
> + build_append_int_noprefix(blob, 0, 2);
> + /* PCI BDF start */
> + build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 2);
> + /* PCI BDF end */
> + build_append_int_noprefix(blob, PCI_BUILD_BDF(max_bus, 0xff), 2);
> + /* Output node */
> + build_append_int_noprefix(blob, pci_ranges->output_node, 2);
> + /* Reserved */
> + build_append_int_noprefix(blob, 0, 6);
> +
> + pci_ranges->count++;
> + }
> + }
> +
> + return 0;
> +}
> +
> +/*
> + * Generate a VIOT table with one PCI-based virtio-iommu that manages PCI
> + * endpoints.
Add a link to the doc + Table refs?
https://jpbrucker.net/virtio-iommu/viot/viot-v9.pdf
> + */
> +void build_viot(GArray *table_data, BIOSLinker *linker,
> + uint16_t virtio_iommu_bdf, const char *oem_id,
> + const char *oem_table_id)
> +{
> + /* virtio-iommu node follows the header */
> + int viommu_off = sizeof(AcpiViot);
> + int viot_start = table_data->len;
> + struct viot_pci_ranges pci_ranges = {
> + .output_node = viommu_off,
> + .blob = g_array_new(false, true, 1),
> + };
> +
> + /* Build the list of PCI ranges that this viommu manages */
> + object_child_foreach_recursive(object_get_root(), viot_host_bridges,
> + &pci_ranges);
> +
> + /* VIOT header */
> + acpi_data_push(table_data, sizeof(AcpiTableHeader));
Depending on the order of landing, this may be replaced by Igor's
acpi_init_table() and associated with acpi_table_composed().
> + /* Node count */
> + build_append_int_noprefix(table_data, pci_ranges.count + 1, 2);
> + /* Node offset */
> + build_append_int_noprefix(table_data, viommu_off, 2);
> + /* Reserved */
> + build_append_int_noprefix(table_data, 0, 8);
> +
> + /* Virtio-iommu based on virtio-pci */
chapter 1.1. By the way may be worth adding table titles in the spec.
> + /* Type */
> + build_append_int_noprefix(table_data, ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI, 1);
s/ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI/3 or put the defines here instead of
in the header.
> + /* Reserved */
> + build_append_int_noprefix(table_data, 0, 1);
> + /* Length */
> + build_append_int_noprefix(table_data, sizeof(AcpiViotVirtioIommuPci), 2);
hardcode the size of AcpiViotVirtioIommuPci
> + /* PCI Segment */
> + build_append_int_noprefix(table_data, 0, 2);
> + /* PCI BDF number */
> + build_append_int_noprefix(table_data, virtio_iommu_bdf, 2);
> + /* Reserved */
> + build_append_int_noprefix(table_data, 0, 8);
> +
> + /* PCI ranges found above */
> + g_array_append_vals(table_data, pci_ranges.blob->data,
> + pci_ranges.blob->len);
> + g_array_free(pci_ranges.blob, true);
> +
> + build_header(linker, table_data, (void *)(table_data->data + viot_start),
> + "VIOT", table_data->len - viot_start, 0, oem_id, oem_table_id);
acpi_table_composed() or whatever its new name
> +}
> +
> diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build
> index 29f804d13e..a510988b27 100644
> --- a/hw/acpi/meson.build
> +++ b/hw/acpi/meson.build
> @@ -16,6 +16,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device
> acpi_ss.add(when: 'CONFIG_ACPI_HMAT', if_true: files('hmat.c'))
> acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files('ghes-stub.c'))
> acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('piix4.c', 'pcihp.c'))
> +acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c'))
there is a conflict with main now if I did not mess up things
<<<<<<< HEAD
acpi_ss.add(when: 'CONFIG_ACPI_PIIX4', if_true: files('piix4.c'))
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c'))
acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false:
files('acpi-pci-hotplug-stub.c'))
=======
acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('piix4.c', 'pcihp.c'))
acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c'))
>>>>>>> hw/acpi: Add VIOT table
Thanks
Eric
> acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c'))
> acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c'))
> acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c'))
On Mon, Sep 06, 2021 at 02:58:55PM +0200, Eric Auger wrote:
> Hi Jean,
>
> On 9/3/21 4:32 PM, Jean-Philippe Brucker wrote:
> > Add a function that generates a Virtual I/O Translation table (VIOT),
> > describing the topology of paravirtual IOMMUs. The table is created when
> > instantiating a virtio-iommu device. It contains a virtio-iommu node and
> > PCI Range nodes for endpoints managed by the IOMMU. By default, a single
> > node describes all PCI devices. When passing the "default_bus_bypass_iommu"
> > machine option and "bypass_iommu" PXB option, only buses that do not
> > bypass the IOMMU are described by PCI Range nodes.
>
> You may consider to add a test for the new VIOT table (However this may
> be added afterwards). See tests/qtest/bios-tables-test.c for the process
> and examples.
Sure, it adds 5 patches to the series
> > +/*
> > + * Generate a VIOT table with one PCI-based virtio-iommu that manages PCI
> > + * endpoints.
> Add a link to the doc + Table refs?
>
> https://jpbrucker.net/virtio-iommu/viot/viot-v9.pdf
No, this document is only temporary until the next ACPI release. Although
the titles and text should be the same, I don't know the refs in the final
document but they will be different
>
> > + */
> > +void build_viot(GArray *table_data, BIOSLinker *linker,
> > + uint16_t virtio_iommu_bdf, const char *oem_id,
> > + const char *oem_table_id)
> > +{
> > + /* virtio-iommu node follows the header */
> > + int viommu_off = sizeof(AcpiViot);
> > + int viot_start = table_data->len;
> > + struct viot_pci_ranges pci_ranges = {
> > + .output_node = viommu_off,
> > + .blob = g_array_new(false, true, 1),
> > + };
> > +
> > + /* Build the list of PCI ranges that this viommu manages */
> > + object_child_foreach_recursive(object_get_root(), viot_host_bridges,
> > + &pci_ranges);
> > +
> > + /* VIOT header */
> > + acpi_data_push(table_data, sizeof(AcpiTableHeader));
> Depending on the order of landing, this may be replaced by Igor's
>
> acpi_init_table() and associated with acpi_table_composed().
Ok I'll prepare that as a fixup, and send it or squash it after the ACPI
series lands
> <<<<<<< HEAD
> acpi_ss.add(when: 'CONFIG_ACPI_PIIX4', if_true: files('piix4.c'))
> acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c'))
> acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false:
> files('acpi-pci-hotplug-stub.c'))
> =======
> acpi_ss.add(when: 'CONFIG_ACPI_X86', if_true: files('piix4.c', 'pcihp.c'))
> acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c'))
> >>>>>>> hw/acpi: Add VIOT table
Indeed, fixed
Thanks,
Jean
© 2016 - 2026 Red Hat, Inc.