[PATCH 12/22] iommu/amd: Add helper functions to manage private address region for each VM

Suravee Suthikulpanit posted 22 patches 3 days, 2 hours ago
[PATCH 12/22] iommu/amd: Add helper functions to manage private address region for each VM
Posted by Suravee Suthikulpanit 3 days, 2 hours ago
Some parts of vIOMMU Private Address (IPA) region (i.e. Guest Device ID
mappng and Guest Domain ID mapping) are managed per-VM during VM create
and destroy.

Introduce helper functions to allocate and map, free and upmap per-VM IPA
region.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 drivers/iommu/amd/amd_iommu.h |  2 ++
 drivers/iommu/amd/iommu.c     |  4 +--
 drivers/iommu/amd/nested.c    |  1 +
 drivers/iommu/amd/viommu.c    | 52 +++++++++++++++++++++++++++++++++++
 4 files changed, 57 insertions(+), 2 deletions(-)

diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 9bf07887b044..b5a54617a9a1 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -100,6 +100,8 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain,
 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
 				     ioasid_t pasid, u64 address, size_t size);
 
+void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+			  struct iommu_iotlb_gather *gather);
 int amd_iommu_flush_private_vm_region(struct amd_iommu *iommu, struct protection_domain *pdom,
 				      u64 address, size_t size);
 
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 8143cf03dcc1..4be3589b9393 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2723,8 +2723,8 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
 	spin_unlock_irqrestore(&dom->lock, flags);
 }
 
-static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
-				 struct iommu_iotlb_gather *gather)
+void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+			  struct iommu_iotlb_gather *gather)
 {
 	struct protection_domain *dom = to_pdomain(domain);
 	unsigned long flags;
diff --git a/drivers/iommu/amd/nested.c b/drivers/iommu/amd/nested.c
index 66cc36133c8b..c210b8003fd5 100644
--- a/drivers/iommu/amd/nested.c
+++ b/drivers/iommu/amd/nested.c
@@ -291,4 +291,5 @@ static void nested_domain_free(struct iommu_domain *dom)
 static const struct iommu_domain_ops nested_domain_ops = {
 	.attach_dev = nested_attach_device,
 	.free = nested_domain_free,
+	.iotlb_sync = amd_iommu_iotlb_sync,
 };
diff --git a/drivers/iommu/amd/viommu.c b/drivers/iommu/amd/viommu.c
index 27179d5087bc..fbc6b37b2517 100644
--- a/drivers/iommu/amd/viommu.c
+++ b/drivers/iommu/amd/viommu.c
@@ -283,3 +283,55 @@ int __init amd_viommu_init(struct amd_iommu *iommu)
 
 	return 0;
 }
+
+static int alloc_private_vm_region(struct amd_iommu *iommu, u64 **entry,
+				   u64 base, size_t size, u16 gid)
+{
+	int ret;
+	size_t mapped;
+	u64 addr = base + (gid * size);
+	int nid = iommu && iommu->dev ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
+
+	*entry = (void *)iommu_alloc_pages_node_sz(nid, GFP_KERNEL | __GFP_ZERO, size);
+	if (!*entry)
+		return -ENOMEM;
+
+	ret = set_memory_uc((unsigned long)*entry, size >> PAGE_SHIFT);
+	if (ret)
+		return ret;
+
+	pr_debug("%s: entry=%#llx(%#llx), addr=%#llx, size=%#lx\n", __func__,
+		 (unsigned long  long)*entry, iommu_virt_to_phys(*entry), addr, size);
+
+	ret = pt_iommu_amdv1_map_pages(&iommu->viommu_pdom->domain, addr,
+				       iommu_virt_to_phys(*entry), PAGE_SIZE, (size / PAGE_SIZE),
+				       IOMMU_PROT_IR | IOMMU_PROT_IW, GFP_KERNEL, &mapped);
+	if (ret)
+		return ret;
+
+	return amd_iommu_flush_private_vm_region(iommu, iommu->viommu_pdom, addr, size);
+}
+
+static void free_private_vm_region(struct amd_iommu *iommu, u64 **entry,
+					u64 base, size_t size, u16 gid)
+{
+	size_t ret;
+	struct iommu_iotlb_gather gather;
+	u64 addr = base + (gid * size);
+
+	pr_debug("%s: entry=%#llx(%#llx), base=%#llx, addr=%#llx, size=%#lx\n",
+		 __func__, (unsigned long  long)*entry,
+		 iommu_virt_to_phys(*entry), base, addr, size);
+
+	if (!iommu || !iommu->viommu_pdom)
+		return;
+
+	iommu_iotlb_gather_init(&gather);
+	ret = pt_iommu_amdv1_unmap_pages(&iommu->viommu_pdom->domain,
+					 addr, PAGE_SIZE, (size / PAGE_SIZE), &gather);
+	if (ret)
+		amd_iommu_iotlb_sync(&iommu->viommu_pdom->domain, &gather);
+
+	iommu_free_pages(*entry);
+	*entry = NULL;
+}
-- 
2.34.1