Introduce vIOMMU mapping layer in order to support passthrough of IOMMU
devices attached to different physical IOMMUs (e.g. devices with the same streamID).
New generic vIOMMU API is added: viommu_allocate_free_vid().
This function will allocate a new guest vSID and map it to input pSID.
Once mapping is established, guest will use vSID for stage-1 commands
and xen will translate vSID->pSID and propagate it towards stage-2.
Introduced naming is generic (vID/pID), since this API could be used
for other IOMMU types in the future.
Implemented usage of the new API for dom0less guests. vSIDs are allocated
on guest device tree creation and the original pSID is
replaced with vSID which shall be used by the guest driver.
Signed-off-by: Milan Djokic <milan_djokic@epam.com>
---
xen/arch/arm/include/asm/viommu.h | 10 ++++
xen/common/device-tree/dom0less-build.c | 32 +++++++++---
xen/drivers/passthrough/arm/viommu.c | 7 +++
xen/drivers/passthrough/arm/vsmmu-v3.c | 67 ++++++++++++++++++++++++-
4 files changed, 106 insertions(+), 10 deletions(-)
diff --git a/xen/arch/arm/include/asm/viommu.h b/xen/arch/arm/include/asm/viommu.h
index 01d4d0dfef..6a2fc56e38 100644
--- a/xen/arch/arm/include/asm/viommu.h
+++ b/xen/arch/arm/include/asm/viommu.h
@@ -33,6 +33,15 @@ struct viommu_ops {
* Called during domain destruction to free resources used by vIOMMU.
*/
int (*relinquish_resources)(struct domain *d);
+
+ /*
+ * Allocate free vSID/vRID for the guest device and establish vID->pID mapping
+ * Called during domain device assignment.
+ * Returns 0 on success and sets vid argument to newly allocated vSID/vRID
+ * mapped to physical ID (id argument).
+ * Negative error code returned if allocation fails.
+ */
+ int (*allocate_free_vid)(struct domain *d, uint32_t id, uint32_t *vid);
};
struct viommu_desc {
@@ -48,6 +57,7 @@ struct viommu_desc {
int domain_viommu_init(struct domain *d, uint16_t viommu_type);
int viommu_relinquish_resources(struct domain *d);
+int viommu_allocate_free_vid(struct domain *d, uint32_t id, uint32_t *vid);
uint16_t viommu_get_type(void);
void add_to_host_iommu_list(paddr_t addr, paddr_t size,
const struct dt_device_node *node);
diff --git a/xen/common/device-tree/dom0less-build.c b/xen/common/device-tree/dom0less-build.c
index 4b74d2f705..f5afdf381c 100644
--- a/xen/common/device-tree/dom0less-build.c
+++ b/xen/common/device-tree/dom0less-build.c
@@ -31,6 +31,8 @@
#include <xen/static-memory.h>
#include <xen/static-shmem.h>
+#include <asm/viommu.h>
+
#define XENSTORE_PFN_LATE_ALLOC UINT64_MAX
static domid_t __initdata xs_domid = DOMID_INVALID;
@@ -318,22 +320,33 @@ static int __init handle_prop_pfdt(struct kernel_info *kinfo,
return ( propoff != -FDT_ERR_NOTFOUND ) ? propoff : 0;
}
-static void modify_pfdt_node(void *pfdt, int nodeoff)
+#ifdef CONFIG_ARM_VIRTUAL_IOMMU
+static void modify_pfdt_node(void *pfdt, int nodeoff, struct domain *d)
{
int proplen, i, rc;
const fdt32_t *prop;
fdt32_t *prop_c;
+ uint32_t vsid;
- prop = fdt_getprop(pfdt, nodeoff, "iommus", &proplen);
+ prop = fdt_getprop(pfdt, nodeoff, "iommus", &proplen);
if ( !prop )
return;
prop_c = xzalloc_bytes(proplen);
+ /*
+ * Assign <vIOMMU vSID> pairs to iommus property and establish
+ * vSID->pSID mappings
+ */
for ( i = 0; i < proplen / 8; ++i )
{
prop_c[i * 2] = cpu_to_fdt32(GUEST_PHANDLE_VSMMUV3);
- prop_c[i * 2 + 1] = prop[i * 2 + 1];
+ rc = viommu_allocate_free_vid(d, fdt32_to_cpu(prop[i * 2 + 1]), &vsid);
+ if( rc ) {
+ dprintk(XENLOG_ERR, "Failed to allocate new vSID for iommu device");
+ return;
+ }
+ prop_c[i * 2 + 1] = cpu_to_fdt32(vsid);
}
rc = fdt_setprop(pfdt, nodeoff, "iommus", prop_c, proplen);
@@ -345,11 +358,14 @@ static void modify_pfdt_node(void *pfdt, int nodeoff)
return;
}
+#else
+ static void modify_pfdt_node(void *pfdt, int nodeoff, struct domain *d) {}
+#endif
static int __init scan_pfdt_node(struct kernel_info *kinfo, void *pfdt,
int nodeoff,
uint32_t address_cells, uint32_t size_cells,
- bool scan_passthrough_prop)
+ bool scan_passthrough_prop, struct domain *d)
{
int rc = 0;
void *fdt = kinfo->fdt;
@@ -372,9 +388,9 @@ static int __init scan_pfdt_node(struct kernel_info *kinfo, void *pfdt,
node_next = fdt_first_subnode(pfdt, nodeoff);
while ( node_next > 0 )
{
- modify_pfdt_node(pfdt, node_next);
+ modify_pfdt_node(pfdt, node_next, d);
rc = scan_pfdt_node(kinfo, pfdt, node_next, address_cells, size_cells,
- scan_passthrough_prop);
+ scan_passthrough_prop, d);
if ( rc )
return rc;
@@ -443,7 +459,7 @@ static int __init domain_handle_dtb_boot_module(struct domain *d,
res = scan_pfdt_node(kinfo, pfdt, node_next,
DT_ROOT_NODE_ADDR_CELLS_DEFAULT,
DT_ROOT_NODE_SIZE_CELLS_DEFAULT,
- false);
+ false, d);
if ( res )
goto out;
continue;
@@ -453,7 +469,7 @@ static int __init domain_handle_dtb_boot_module(struct domain *d,
res = scan_pfdt_node(kinfo, pfdt, node_next,
DT_ROOT_NODE_ADDR_CELLS_DEFAULT,
DT_ROOT_NODE_SIZE_CELLS_DEFAULT,
- true);
+ true, d);
if ( res )
goto out;
continue;
diff --git a/xen/drivers/passthrough/arm/viommu.c b/xen/drivers/passthrough/arm/viommu.c
index 5f5892fbb2..4b7837a91f 100644
--- a/xen/drivers/passthrough/arm/viommu.c
+++ b/xen/drivers/passthrough/arm/viommu.c
@@ -71,6 +71,13 @@ int viommu_relinquish_resources(struct domain *d)
return cur_viommu->ops->relinquish_resources(d);
}
+int viommu_allocate_free_vid(struct domain *d, uint32_t id, uint32_t *vid) {
+ if ( !cur_viommu )
+ return -ENODEV;
+
+ return cur_viommu->ops->allocate_free_vid(d, id, vid);
+}
+
uint16_t viommu_get_type(void)
{
if ( !cur_viommu )
diff --git a/xen/drivers/passthrough/arm/vsmmu-v3.c b/xen/drivers/passthrough/arm/vsmmu-v3.c
index 5d0dabd2b2..604f09e980 100644
--- a/xen/drivers/passthrough/arm/vsmmu-v3.c
+++ b/xen/drivers/passthrough/arm/vsmmu-v3.c
@@ -53,6 +53,8 @@ extern const struct viommu_desc __read_mostly *cur_viommu;
#define smmu_get_ste_s1ctxptr(x) FIELD_PREP(STRTAB_STE_0_S1CTXPTR_MASK, \
FIELD_GET(STRTAB_STE_0_S1CTXPTR_MASK, x))
+#define MAX_VSID (1 << SMMU_IDR1_SIDSIZE)
+
/* event queue entry */
struct arm_smmu_evtq_ent {
/* Common fields */
@@ -100,6 +102,14 @@ struct arm_vsmmu_queue {
uint8_t max_n_shift;
};
+/* vSID->pSID mapping entry */
+struct vsid_entry {
+ bool valid;
+ uint32_t vsid;
+ struct host_iommu *phys_smmu;
+ uint32_t psid;
+};
+
struct virt_smmu {
struct domain *d;
struct list_head viommu_list;
@@ -118,6 +128,7 @@ struct virt_smmu {
uint64_t evtq_irq_cfg0;
struct arm_vsmmu_queue evtq, cmdq;
spinlock_t cmd_queue_lock;
+ struct vsid_entry *vsids;
};
/* Queue manipulation functions */
@@ -426,6 +437,29 @@ static int arm_vsmmu_handle_cfgi_ste(struct virt_smmu *smmu, uint64_t *cmdptr)
struct arm_vsmmu_s1_trans_cfg s1_cfg = {0};
uint32_t sid = smmu_cmd_get_sid(cmdptr[0]);
struct iommu_guest_config guest_cfg = {0};
+ uint32_t psid;
+ struct arm_smmu_evtq_ent ent = {
+ .opcode = EVT_ID_BAD_STE,
+ .sid = sid,
+ .c_bad_ste_streamid = {
+ .ssid = 0,
+ .ssv = false,
+ },
+ };
+
+ /* SIDs identity mapped for HW domain */
+ if ( is_hardware_domain(d) )
+ psid = sid;
+ else {
+ /* vSID out of range or not mapped to pSID */
+ if ( sid >= MAX_VSID || !smmu->vsids[sid].valid )
+ {
+ arm_vsmmu_send_event(smmu, &ent);
+ return -EINVAL;
+ }
+
+ psid = smmu->vsids[sid].psid;
+ }
ret = arm_vsmmu_find_ste(smmu, sid, ste);
if ( ret )
@@ -446,7 +480,7 @@ static int arm_vsmmu_handle_cfgi_ste(struct virt_smmu *smmu, uint64_t *cmdptr)
else
guest_cfg.config = ARM_SMMU_DOMAIN_NESTED;
- ret = hd->platform_ops->attach_guest_config(d, sid, &guest_cfg);
+ ret = hd->platform_ops->attach_guest_config(d, psid, &guest_cfg);
if ( ret )
return ret;
@@ -791,6 +825,7 @@ static int vsmmuv3_init_single(struct domain *d, paddr_t addr,
smmu->cmdq.ent_size = CMDQ_ENT_DWORDS * DWORDS_BYTES;
smmu->evtq.q_base = FIELD_PREP(Q_BASE_LOG2SIZE, SMMU_EVTQS);
smmu->evtq.ent_size = EVTQ_ENT_DWORDS * DWORDS_BYTES;
+ smmu->vsids = xzalloc_array(struct vsid_entry, MAX_VSID);
spin_lock_init(&smmu->cmd_queue_lock);
@@ -850,8 +885,9 @@ int vsmmuv3_relinquish_resources(struct domain *d)
if ( list_head_is_null(&d->arch.viommu_list) )
return 0;
- list_for_each_entry_safe(pos, temp, &d->arch.viommu_list, viommu_list )
+ list_for_each_entry_safe(pos, temp, &d->arch.viommu_list, viommu_list)
{
+ xfree(pos->vsids);
list_del(&pos->viommu_list);
xfree(pos);
}
@@ -859,8 +895,35 @@ int vsmmuv3_relinquish_resources(struct domain *d)
return 0;
}
+int vsmmuv3_allocate_free_vid(struct domain *d, uint32_t id, uint32_t *vid) {
+ uint16_t i = 0;
+ struct virt_smmu *smmu;
+
+ if ( list_head_is_null(&d->arch.viommu_list) )
+ return -ENODEV;
+
+ smmu = list_first_entry(&d->arch.viommu_list, struct virt_smmu, viommu_list);
+
+ /* Get first free vSID index */
+ while ( smmu->vsids[i].valid && i++ < MAX_VSID );
+
+ /* Max number of vSIDs already allocated? */
+ if ( i == MAX_VSID) {
+ return -ENOMEM;
+ }
+
+ /* Establish vSID->pSID mapping */
+ smmu->vsids[i].valid = true;
+ smmu->vsids[i].vsid = i;
+ smmu->vsids[i].psid = id;
+ *vid = smmu->vsids[i].vsid;
+
+ return 0;
+}
+
static const struct viommu_ops vsmmuv3_ops = {
.domain_init = domain_vsmmuv3_init,
+ .allocate_free_vid = vsmmuv3_allocate_free_vid,
.relinquish_resources = vsmmuv3_relinquish_resources,
};
--
2.43.0