Define clear_user_highpages() which clears pages sequentially using
the single page variant.
With !CONFIG_HIGHMEM, pages are contiguous so use the range clearing
primitive clear_user_pages().
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
---
include/linux/highmem.h | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index abc20f9810fd..c7ca88ce9937 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -199,6 +199,11 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
+/**
+ * clear_user_highpage() - clear a page to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ */
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_local_page(page);
@@ -207,6 +212,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
}
#endif
+/**
+ * clear_user_highpages() - clear a page range to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ * @npages: number of pages
+ *
+ * Assumes that all the pages in the region (@page, +@npages) are valid
+ * so this does no exception handling.
+ */
+static inline void clear_user_highpages(struct page *page, unsigned long vaddr,
+ unsigned int npages)
+{
+ if (!IS_ENABLED(CONFIG_HIGHMEM)) {
+ clear_user_pages(page_address(page), vaddr, page, npages);
+ return;
+ }
+
+ do {
+ clear_user_highpage(page, vaddr);
+ vaddr += PAGE_SIZE;
+ page++;
+ } while (--npages);
+}
+
#ifndef vma_alloc_zeroed_movable_folio
/**
* vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
--
2.31.1