Go through the domain data persisted in KHO, allocate fresh dmar_domain
structs and populate the structs with the persisted data.
Persisted page table pages in the "mem" field are also claimed to
transfer ownership of the pages from KHO back to the intel-iommu driver.
Once re-hydrated the struct iommu_domain pointers are inserted into the
persisted_domains xarray so that they can be fetched later when they
need to be restored by iommufd. This will be done in the next commit.
---
drivers/iommu/intel/iommu.c | 9 ++++++-
drivers/iommu/intel/iommu.h | 1 +
drivers/iommu/intel/serialise.c | 44 +++++++++++++++++++++++++++++++++
3 files changed, 53 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 0a2118a3b7c4..8e0ed033b03f 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1505,7 +1505,7 @@ static bool first_level_by_default(unsigned int type)
return type != IOMMU_DOMAIN_UNMANAGED;
}
-static struct dmar_domain *alloc_domain(unsigned int type)
+struct dmar_domain *alloc_domain(unsigned int type)
{
struct dmar_domain *domain;
@@ -3468,6 +3468,7 @@ int __init intel_iommu_init(void)
init_no_remapping_devices();
+ intel_iommu_deserialise_kho();
ret = init_dmars();
if (ret) {
if (force_on)
@@ -4127,6 +4128,12 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
+
+ /*
+ * TODO: around here the device should be added to the persistent
+ * domain if it is a persistent device.
+ */
+
if (pdev && pci_ats_supported(pdev)) {
ret = device_rbtree_insert(iommu, info);
if (ret)
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index cd932a97a9bc..7ee050ebfaca 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -1118,6 +1118,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
+struct dmar_domain *alloc_domain(unsigned int type);
void domain_update_iotlb(struct dmar_domain *domain);
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/serialise.c b/drivers/iommu/intel/serialise.c
index bc755e51732b..20f42b84d490 100644
--- a/drivers/iommu/intel/serialise.c
+++ b/drivers/iommu/intel/serialise.c
@@ -124,7 +124,51 @@ int intel_iommu_serialise_kho(struct notifier_block *self, unsigned long cmd,
}
}
+static void deserialise_domains(const void *fdt, int root_off)
+{
+ int off;
+ struct dmar_domain *dmar_domain;
+
+ fdt_for_each_subnode(off, fdt, root_off) {
+ const struct kho_mem *kho_mems;
+ int len, idx;
+ const unsigned long *pgd_phys;
+ const int *agaw;
+ const unsigned long *persistent_id;
+ int rc;
+
+ dmar_domain = alloc_domain(IOMMU_DOMAIN_UNMANAGED);
+
+ kho_mems = fdt_getprop(fdt, off, "mem", &len);
+ for (idx = 0; idx * sizeof(struct kho_mem) < len; ++idx)
+ kho_claim_mem(&kho_mems[idx]);
+
+ pgd_phys = fdt_getprop(fdt, off, "pgd", &len);
+ dmar_domain->pgd = phys_to_virt(*pgd_phys);
+ agaw = fdt_getprop(fdt, off, "agaw", &len);
+ dmar_domain->agaw = *agaw;
+ persistent_id = fdt_getprop(fdt, off, "persistent_id", &len);
+ dmar_domain->domain.persistent_id = *persistent_id;
+
+ rc = xa_insert(&persistent_domains, *persistent_id,
+ &dmar_domain->domain, GFP_KERNEL);
+ if (rc)
+ pr_warn("Unable to re-insert persistent domain %lu\n", *persistent_id);
+ }
+}
+
int __init intel_iommu_deserialise_kho(void)
{
+ const void *fdt = kho_get_fdt();
+ int off;
+
+ if (!fdt)
+ return 0;
+
+ off = fdt_path_offset(fdt, "/intel-iommu");
+ if (off <= 0)
+ return 0; /* No data in KHO */
+
+ deserialise_domains(fdt, fdt_subnode_offset(fdt, off, "domains"));
return 0;
}
--
2.34.1