The AMD vIOMMU virtualizes guest MMIO registers at the 3rd 4K region.
This is achieved using the iommufd_viommu_alloc_mmap().
Co-developed-by: Vasant Hegde <Vasant.Hegde@amd.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
drivers/iommu/amd/amd_viommu.h | 5 +++++
drivers/iommu/amd/iommufd.c | 17 ++++++++++++++++-
drivers/iommu/amd/viommu.c | 11 +++++++++++
include/uapi/linux/iommufd.h | 2 ++
4 files changed, 34 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/amd/amd_viommu.h b/drivers/iommu/amd/amd_viommu.h
index 45c2b71af4ba..8dbb12241e8d 100644
--- a/drivers/iommu/amd/amd_viommu.h
+++ b/drivers/iommu/amd/amd_viommu.h
@@ -10,6 +10,7 @@
int amd_viommu_init(struct amd_iommu *iommu);
+u64 amd_viommu_get_vfmmio_addr(struct amd_iommu *iommu, u16 gid);
#else
static inline int amd_viommu_init(struct amd_iommu *iommu)
@@ -17,6 +18,10 @@ static inline int amd_viommu_init(struct amd_iommu *iommu)
return 0;
}
+u64 amd_viommu_get_vfmmio_addr(struct amd_iommu *iommu, u16 gid);
+{
+ return 0;
+}
#endif /* CONFIG_AMD_IOMMU_IOMMUFD */
#endif /* AMD_VIOMMU_H */
diff --git a/drivers/iommu/amd/iommufd.c b/drivers/iommu/amd/iommufd.c
index 6fba5d9b1310..5dcd3fc3ba99 100644
--- a/drivers/iommu/amd/iommufd.c
+++ b/drivers/iommu/amd/iommufd.c
@@ -7,6 +7,7 @@
#include "iommufd.h"
#include "amd_iommu.h"
+#include "amd_viommu.h"
#include "amd_iommu_types.h"
static const struct iommufd_viommu_ops amd_viommu_ops;
@@ -44,9 +45,11 @@ int amd_iommufd_viommu_init(struct iommufd_viommu *viommu, struct iommu_domain *
const struct iommu_user_data *user_data)
{
int ret;
+ phys_addr_t page_base;
unsigned long flags;
struct iommu_viommu_amd data;
struct protection_domain *pdom = to_pdomain(parent);
+ struct amd_iommu *iommu = container_of(viommu->iommu_dev, struct amd_iommu, iommu);
struct amd_iommu_viommu *aviommu = container_of(viommu, struct amd_iommu_viommu, core);
xa_init_flags(&aviommu->gdomid_array, XA_FLAGS_ALLOC1);
@@ -66,11 +69,21 @@ int amd_iommufd_viommu_init(struct iommufd_viommu *viommu, struct iommu_domain *
return aviommu->gid;
pr_debug("%s: gid=%#x", __func__, aviommu->gid);
+ page_base = amd_viommu_get_vfmmio_addr(iommu, aviommu->gid);
+ if (page_base <= 0)
+ return -ENODEV;
+
+ ret = iommufd_viommu_alloc_mmap(&aviommu->core,
+ page_base, SZ_4K,
+ (unsigned long *)&data.out_vfmmio_mmap_offset);
+ if (ret)
+ goto err_out;
+
ret = iommu_copy_struct_to_user(user_data, &data,
IOMMU_VIOMMU_TYPE_AMD,
reserved);
if (ret)
- goto err_out;
+ goto free_mmap;
viommu->ops = &amd_viommu_ops;
@@ -79,6 +92,8 @@ int amd_iommufd_viommu_init(struct iommufd_viommu *viommu, struct iommu_domain *
spin_unlock_irqrestore(&pdom->lock, flags);
return 0;
+free_mmap:
+ iommufd_viommu_destroy_mmap(&aviommu->core, data.out_vfmmio_mmap_offset);
err_out:
amd_iommu_gid_free(aviommu->gid);
return ret;
diff --git a/drivers/iommu/amd/viommu.c b/drivers/iommu/amd/viommu.c
index 887a9eb8122d..76198bf4f4f6 100644
--- a/drivers/iommu/amd/viommu.c
+++ b/drivers/iommu/amd/viommu.c
@@ -92,6 +92,17 @@ static int __init viommu_vf_vfcntl_init(struct amd_iommu *iommu)
return 0;
}
+/*
+ * Returns VF MMIO BAR offset for the give guest ID which will be
+ * mapped to guest vIOMMU 3rd 4K MMIO address
+ */
+u64 amd_viommu_get_vfmmio_addr(struct amd_iommu *iommu, u16 gid)
+{
+ /* TODO: Add check for sVIOMMU and set gid[bit 15] */
+ return iommu->vf_base_phys + gid * VIOMMU_VF_MMIO_ENTRY_SIZE;
+}
+EXPORT_SYMBOL(amd_viommu_get_vfmmio_addr);
+
int __init amd_viommu_init(struct amd_iommu *iommu)
{
int ret;
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 3a2ac7234b9e..eff5fbd2da6b 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -1075,9 +1075,11 @@ struct iommu_viommu_tegra241_cmdqv {
/**
* struct iommu_viommu_amd - AMD vIOMMU Interface (IOMMU_VIOMMU_TYPE_AMD)
+ * @out_vfmmio_mmap_offset: (out) mmap offset for vIOMMU VF-MMIO
* @reserved: Must be zero
*/
struct iommu_viommu_amd {
+ __aligned_u64 out_vfmmio_mmap_offset;
__u32 reserved; /* must be last */
};
--
2.34.1