From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
There are three originators for the PCI configuration space access:
1. The domain that owns physical host bridge: MMIO handlers are
there so we can update vPCI register handlers with the values
written by the hardware domain, e.g. physical view of the registers
vs guest's view on the configuration space.
2. Guest access to the passed through PCI devices: we need to properly
map virtual bus topology to the physical one, e.g. pass the configuration
space access to the corresponding physical devices.
3. Emulated host PCI bridge access. It doesn't exist in the physical
topology, e.g. it can't be mapped to some physical host bridge.
So, all access to the host bridge itself needs to be trapped and
emulated.
Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Signed-off-by: Volodymyr Babchuk <volodymyr_babchuk@epam.com>
Signed-off-by: Stewart Hildebrand <stewart.hildebrand@amd.com>
---
In v11:
- Fixed format issues
- Added ASSERT_UNREACHABLE() to the dummy implementation of
vpci_translate_virtual_device()
- Moved variable in vpci_sbdf_from_gpa(), now it is easier to follow
the logic in the function
Since v9:
- Commend about required lock replaced with ASSERT()
- Style fixes
- call to vpci_translate_virtual_device folded into vpci_sbdf_from_gpa
Since v8:
- locks moved out of vpci_translate_virtual_device()
Since v6:
- add pcidevs locking to vpci_translate_virtual_device
- update wrt to the new locking scheme
Since v5:
- add vpci_translate_virtual_device for #ifndef CONFIG_HAS_VPCI_GUEST_SUPPORT
case to simplify ifdefery
- add ASSERT(!is_hardware_domain(d)); to vpci_translate_virtual_device
- reset output register on failed virtual SBDF translation
Since v4:
- indentation fixes
- constify struct domain
- updated commit message
- updates to the new locking scheme (pdev->vpci_lock)
Since v3:
- revisit locking
- move code to vpci.c
Since v2:
- pass struct domain instead of struct vcpu
- constify arguments where possible
- gate relevant code with CONFIG_HAS_VPCI_GUEST_SUPPORT
New in v2
---
xen/arch/arm/vpci.c | 47 +++++++++++++++++++++++++++++++----------
xen/drivers/vpci/vpci.c | 24 +++++++++++++++++++++
xen/include/xen/vpci.h | 12 +++++++++++
3 files changed, 72 insertions(+), 11 deletions(-)
diff --git a/xen/arch/arm/vpci.c b/xen/arch/arm/vpci.c
index 3bc4bb55082a..7a6a0017d132 100644
--- a/xen/arch/arm/vpci.c
+++ b/xen/arch/arm/vpci.c
@@ -7,31 +7,51 @@
#include <asm/mmio.h>
-static pci_sbdf_t vpci_sbdf_from_gpa(const struct pci_host_bridge *bridge,
- paddr_t gpa)
+static bool vpci_sbdf_from_gpa(struct domain *d,
+ const struct pci_host_bridge *bridge,
+ paddr_t gpa, pci_sbdf_t *sbdf)
{
- pci_sbdf_t sbdf;
+ bool translated = true;
+
+ ASSERT(sbdf);
if ( bridge )
{
- sbdf.sbdf = VPCI_ECAM_BDF(gpa - bridge->cfg->phys_addr);
- sbdf.seg = bridge->segment;
- sbdf.bus += bridge->cfg->busn_start;
+ sbdf->sbdf = VPCI_ECAM_BDF(gpa - bridge->cfg->phys_addr);
+ sbdf->seg = bridge->segment;
+ sbdf->bus += bridge->cfg->busn_start;
}
else
- sbdf.sbdf = VPCI_ECAM_BDF(gpa - GUEST_VPCI_ECAM_BASE);
+ {
+ /*
+ * For the passed through devices we need to map their virtual SBDF
+ * to the physical PCI device being passed through.
+ */
+ sbdf->sbdf = VPCI_ECAM_BDF(gpa - GUEST_VPCI_ECAM_BASE);
+ read_lock(&d->pci_lock);
+ translated = vpci_translate_virtual_device(d, sbdf);
+ read_unlock(&d->pci_lock);
+ }
- return sbdf;
+ return translated;
}
static int vpci_mmio_read(struct vcpu *v, mmio_info_t *info,
register_t *r, void *p)
{
struct pci_host_bridge *bridge = p;
- pci_sbdf_t sbdf = vpci_sbdf_from_gpa(bridge, info->gpa);
+ pci_sbdf_t sbdf;
/* data is needed to prevent a pointer cast on 32bit */
unsigned long data;
+ ASSERT(!bridge == !is_hardware_domain(v->domain));
+
+ if ( !vpci_sbdf_from_gpa(v->domain, bridge, info->gpa, &sbdf) )
+ {
+ *r = ~0UL;
+ return 1;
+ }
+
if ( vpci_ecam_read(sbdf, ECAM_REG_OFFSET(info->gpa),
1U << info->dabt.size, &data) )
{
@@ -39,7 +59,7 @@ static int vpci_mmio_read(struct vcpu *v, mmio_info_t *info,
return 1;
}
- *r = ~0ul;
+ *r = ~0UL;
return 0;
}
@@ -48,7 +68,12 @@ static int vpci_mmio_write(struct vcpu *v, mmio_info_t *info,
register_t r, void *p)
{
struct pci_host_bridge *bridge = p;
- pci_sbdf_t sbdf = vpci_sbdf_from_gpa(bridge, info->gpa);
+ pci_sbdf_t sbdf;
+
+ ASSERT(!bridge == !is_hardware_domain(v->domain));
+
+ if ( !vpci_sbdf_from_gpa(v->domain, bridge, info->gpa, &sbdf) )
+ return 1;
return vpci_ecam_write(sbdf, ECAM_REG_OFFSET(info->gpa),
1U << info->dabt.size, r);
diff --git a/xen/drivers/vpci/vpci.c b/xen/drivers/vpci/vpci.c
index 23722634d50b..98b294f09688 100644
--- a/xen/drivers/vpci/vpci.c
+++ b/xen/drivers/vpci/vpci.c
@@ -81,6 +81,30 @@ static int add_virtual_device(struct pci_dev *pdev)
return 0;
}
+/*
+ * Find the physical device which is mapped to the virtual device
+ * and translate virtual SBDF to the physical one.
+ */
+bool vpci_translate_virtual_device(const struct domain *d, pci_sbdf_t *sbdf)
+{
+ const struct pci_dev *pdev;
+
+ ASSERT(!is_hardware_domain(d));
+ ASSERT(rw_is_locked(&d->pci_lock));
+
+ for_each_pdev ( d, pdev )
+ {
+ if ( pdev->vpci && (pdev->vpci->guest_sbdf.sbdf == sbdf->sbdf) )
+ {
+ /* Replace guest SBDF with the physical one. */
+ *sbdf = pdev->sbdf;
+ return true;
+ }
+ }
+
+ return false;
+}
+
#endif /* CONFIG_HAS_VPCI_GUEST_SUPPORT */
void vpci_deassign_device(struct pci_dev *pdev)
diff --git a/xen/include/xen/vpci.h b/xen/include/xen/vpci.h
index 980aded26fc1..7e5a0f0c50c1 100644
--- a/xen/include/xen/vpci.h
+++ b/xen/include/xen/vpci.h
@@ -303,6 +303,18 @@ static inline bool __must_check vpci_process_pending(struct vcpu *v)
}
#endif
+#ifdef CONFIG_HAS_VPCI_GUEST_SUPPORT
+bool vpci_translate_virtual_device(const struct domain *d, pci_sbdf_t *sbdf);
+#else
+static inline bool vpci_translate_virtual_device(const struct domain *d,
+ pci_sbdf_t *sbdf)
+{
+ ASSERT_UNREACHABLE();
+
+ return false;
+}
+#endif
+
#endif
/*
--
2.43.2
© 2016 - 2024 Red Hat, Inc.