IOMMU pages are allocated/freed using APIs using struct ioptdesc. For
the proper preservation and restoration of ioptdesc add helper
functions.
Signed-off-by: Samiullah Khawaja <skhawaja@google.com>
---
drivers/iommu/iommu-pages.c | 74 +++++++++++++++++++++++++++++++++++++
drivers/iommu/iommu-pages.h | 30 +++++++++++++++
2 files changed, 104 insertions(+)
diff --git a/drivers/iommu/iommu-pages.c b/drivers/iommu/iommu-pages.c
index 3bab175d8557..588a8f19b196 100644
--- a/drivers/iommu/iommu-pages.c
+++ b/drivers/iommu/iommu-pages.c
@@ -6,6 +6,7 @@
#include "iommu-pages.h"
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
+#include <linux/kexec_handover.h>
#include <linux/mm.h>
#define IOPTDESC_MATCH(pg_elm, elm) \
@@ -131,6 +132,79 @@ void iommu_put_pages_list(struct iommu_pages_list *list)
}
EXPORT_SYMBOL_GPL(iommu_put_pages_list);
+#if IS_ENABLED(CONFIG_IOMMU_LIVEUPDATE)
+void iommu_unpreserve_page(void *virt)
+{
+ kho_unpreserve_folio(ioptdesc_folio(virt_to_ioptdesc(virt)));
+}
+EXPORT_SYMBOL_GPL(iommu_unpreserve_page);
+
+int iommu_preserve_page(void *virt)
+{
+ return kho_preserve_folio(ioptdesc_folio(virt_to_ioptdesc(virt)));
+}
+EXPORT_SYMBOL_GPL(iommu_preserve_page);
+
+void iommu_unpreserve_pages(struct iommu_pages_list *list, int count)
+{
+ struct ioptdesc *iopt;
+
+ if (!count)
+ return;
+
+ /* If less than zero then unpreserve all pages. */
+ if (count < 0)
+ count = 0;
+
+ list_for_each_entry(iopt, &list->pages, iopt_freelist_elm) {
+ kho_unpreserve_folio(ioptdesc_folio(iopt));
+ if (count > 0 && --count == 0)
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(iommu_unpreserve_pages);
+
+void iommu_restore_page(u64 phys)
+{
+ struct ioptdesc *iopt;
+ struct folio *folio;
+ unsigned long pgcnt;
+ unsigned int order;
+
+ folio = kho_restore_folio(phys);
+ BUG_ON(!folio);
+
+ iopt = folio_ioptdesc(folio);
+
+ order = folio_order(folio);
+ pgcnt = 1UL << order;
+ mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, pgcnt);
+ lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, pgcnt);
+}
+EXPORT_SYMBOL_GPL(iommu_restore_page);
+
+int iommu_preserve_pages(struct iommu_pages_list *list)
+{
+ struct ioptdesc *iopt;
+ int count = 0;
+ int ret;
+
+ list_for_each_entry(iopt, &list->pages, iopt_freelist_elm) {
+ ret = kho_preserve_folio(ioptdesc_folio(iopt));
+ if (ret) {
+ iommu_unpreserve_pages(list, count);
+ return ret;
+ }
+
+ ++count;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_preserve_pages);
+
+#endif
+
/**
* iommu_pages_start_incoherent - Setup the page for cache incoherent operation
* @virt: The page to setup
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index ae9da4f571f6..bd336fb56b5f 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -53,6 +53,36 @@ void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size);
void iommu_free_pages(void *virt);
void iommu_put_pages_list(struct iommu_pages_list *list);
+#if IS_ENABLED(CONFIG_IOMMU_LIVEUPDATE)
+int iommu_preserve_page(void *virt);
+void iommu_unpreserve_page(void *virt);
+int iommu_preserve_pages(struct iommu_pages_list *list);
+void iommu_unpreserve_pages(struct iommu_pages_list *list, int count);
+void iommu_restore_page(u64 phys);
+#else
+static inline int iommu_preserve_page(void *virt)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iommu_unpreserve_page(void *virt)
+{
+}
+
+static inline int iommu_preserve_pages(struct iommu_pages_list *list)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iommu_unpreserve_pages(struct iommu_pages_list *list, int count)
+{
+}
+
+static inline void iommu_restore_page(u64 phys)
+{
+}
+#endif
+
/**
* iommu_pages_list_add - add the page to a iommu_pages_list
* @list: List to add the page to
--
2.53.0.rc2.204.g2597b5adb4-goog