With domain_alloc_paging callback supported, the legacy domain_alloc
callback will never be used anymore. Remove it to avoid dead code.
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20241021085125.192333-3-baolu.lu@linux.intel.com
---
drivers/iommu/intel/iommu.c | 90 -------------------------------------
1 file changed, 90 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 0d2237b55b6b..204e624b3f9a 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1454,27 +1454,6 @@ static bool first_level_by_default(unsigned int type)
return type != IOMMU_DOMAIN_UNMANAGED;
}
-static struct dmar_domain *alloc_domain(unsigned int type)
-{
- struct dmar_domain *domain;
-
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
- if (!domain)
- return NULL;
-
- domain->nid = NUMA_NO_NODE;
- if (first_level_by_default(type))
- domain->use_first_level = true;
- INIT_LIST_HEAD(&domain->devices);
- INIT_LIST_HEAD(&domain->dev_pasids);
- INIT_LIST_HEAD(&domain->cache_tags);
- spin_lock_init(&domain->lock);
- spin_lock_init(&domain->cache_lock);
- xa_init(&domain->iommu_array);
-
- return domain;
-}
-
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{
struct iommu_domain_info *info, *curr;
@@ -1546,20 +1525,6 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
spin_unlock(&iommu->lock);
}
-static int guestwidth_to_adjustwidth(int gaw)
-{
- int agaw;
- int r = (gaw - 12) % 9;
-
- if (r == 0)
- agaw = gaw;
- else
- agaw = gaw + 9 - r;
- if (agaw > 64)
- agaw = 64;
- return agaw;
-}
-
static void domain_exit(struct dmar_domain *domain)
{
if (domain->pgd) {
@@ -3381,27 +3346,6 @@ void device_block_translation(struct device *dev)
info->domain = NULL;
}
-static int md_domain_init(struct dmar_domain *domain, int guest_width)
-{
- int adjust_width;
-
- /* calculate AGAW */
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- domain->agaw = width_to_agaw(adjust_width);
-
- domain->iommu_coherency = false;
- domain->iommu_superpage = 0;
- domain->max_addr = 0;
-
- /* always allocate the top pgd */
- domain->pgd = iommu_alloc_page_node(domain->nid, GFP_ATOMIC);
- if (!domain->pgd)
- return -ENOMEM;
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static int blocking_domain_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
@@ -3488,39 +3432,6 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
return domain;
}
-static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
-{
- struct dmar_domain *dmar_domain;
- struct iommu_domain *domain;
-
- switch (type) {
- case IOMMU_DOMAIN_DMA:
- case IOMMU_DOMAIN_UNMANAGED:
- dmar_domain = alloc_domain(type);
- if (!dmar_domain) {
- pr_err("Can't allocate dmar_domain\n");
- return NULL;
- }
- if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
- pr_err("Domain initialization failed\n");
- domain_exit(dmar_domain);
- return NULL;
- }
-
- domain = &dmar_domain->domain;
- domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end =
- __DOMAIN_MAX_ADDR(dmar_domain->gaw);
- domain->geometry.force_aperture = true;
-
- return domain;
- default:
- return NULL;
- }
-
- return NULL;
-}
-
static struct iommu_domain *
intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
struct iommu_domain *parent,
@@ -4611,7 +4522,6 @@ const struct iommu_ops intel_iommu_ops = {
.identity_domain = &identity_domain,
.capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info,
- .domain_alloc = intel_iommu_domain_alloc,
.domain_alloc_user = intel_iommu_domain_alloc_user,
.domain_alloc_sva = intel_svm_domain_alloc,
.domain_alloc_paging = intel_iommu_domain_alloc_paging,
--
2.43.0