Define clear_user_highpages() which clears pages sequentially using
the single page variant.
With !CONFIG_HIGHMEM, pages are contiguous so use the range clearing
primitive clear_user_pages().
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
Note: fixed
---
include/linux/highmem.h | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 105cc4c00cc3..c5f8b1556fd7 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -199,6 +199,11 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
+/**
+ * clear_user_highpage() - clear a page to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ */
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_local_page(page);
@@ -207,6 +212,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
}
#endif
+/**
+ * clear_user_highpages() - clear a page range to be mapped to user space
+ * @page: start page
+ * @vaddr: start address of the user mapping
+ * @npages: number of pages
+ *
+ * Assumes that all the pages in the region (@page, +@npages) are valid
+ * so this does no exception handling.
+ */
+static inline void clear_user_highpages(struct page *page, unsigned long vaddr,
+ unsigned int npages)
+{
+ if (!IS_ENABLED(CONFIG_HIGHMEM)) {
+ clear_user_pages(page_address(page), vaddr, page, npages);
+ return;
+ }
+
+ do {
+ clear_user_highpage(page, vaddr);
+ vaddr += PAGE_SIZE;
+ page++;
+ } while (--npages);
+}
+
#ifndef vma_alloc_zeroed_movable_folio
/**
* vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
--
2.43.5
On 27.10.25 21:21, Ankur Arora wrote:
> Define clear_user_highpages() which clears pages sequentially using
> the single page variant.
>
> With !CONFIG_HIGHMEM, pages are contiguous so use the range clearing
> primitive clear_user_pages().
>
> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
>
> Note: fixed
That should be dropped.
> ---
> include/linux/highmem.h | 29 +++++++++++++++++++++++++++++
> 1 file changed, 29 insertions(+)
>
> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
> index 105cc4c00cc3..c5f8b1556fd7 100644
> --- a/include/linux/highmem.h
> +++ b/include/linux/highmem.h
> @@ -199,6 +199,11 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
>
> /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
> #ifndef clear_user_highpage
> +/**
> + * clear_user_highpage() - clear a page to be mapped to user space
> + * @page: start page
> + * @vaddr: start address of the user mapping
> + */
> static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
> {
> void *addr = kmap_local_page(page);
> @@ -207,6 +212,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
> }
> #endif
>
> +/**
> + * clear_user_highpages() - clear a page range to be mapped to user space
> + * @page: start page
> + * @vaddr: start address of the user mapping
> + * @npages: number of pages
> + *
> + * Assumes that all the pages in the region (@page, +@npages) are valid
> + * so this does no exception handling.
> + */
> +static inline void clear_user_highpages(struct page *page, unsigned long vaddr,
> + unsigned int npages)
> +{
> + if (!IS_ENABLED(CONFIG_HIGHMEM)) {
> + clear_user_pages(page_address(page), vaddr, page, npages);
> + return;
> + }
> +
> + do {
> + clear_user_highpage(page, vaddr);
> + vaddr += PAGE_SIZE;
> + page++;
> + } while (--npages);
> +}
> +
> #ifndef vma_alloc_zeroed_movable_folio
> /**
> * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
--
Cheers
David
David Hildenbrand (Red Hat) <david@kernel.org> writes:
> On 27.10.25 21:21, Ankur Arora wrote:
>> Define clear_user_highpages() which clears pages sequentially using
>> the single page variant.
>> With !CONFIG_HIGHMEM, pages are contiguous so use the range clearing
>> primitive clear_user_pages().
>> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
>> Note: fixed
>
> That should be dropped.
>
>> ---
>> include/linux/highmem.h | 29 +++++++++++++++++++++++++++++
>> 1 file changed, 29 insertions(+)
>> diff --git a/include/linux/highmem.h b/include/linux/highmem.h
>> index 105cc4c00cc3..c5f8b1556fd7 100644
>> --- a/include/linux/highmem.h
>> +++ b/include/linux/highmem.h
>> @@ -199,6 +199,11 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
>> /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
>> #ifndef clear_user_highpage
>> +/**
>> + * clear_user_highpage() - clear a page to be mapped to user space
>> + * @page: start page
>> + * @vaddr: start address of the user mapping
>> + */
>> static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
>> {
>> void *addr = kmap_local_page(page);
>> @@ -207,6 +212,30 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
>> }
>> #endif
>> +/**
>> + * clear_user_highpages() - clear a page range to be mapped to user space
>> + * @page: start page
>> + * @vaddr: start address of the user mapping
>> + * @npages: number of pages
>> + *
>> + * Assumes that all the pages in the region (@page, +@npages) are valid
>> + * so this does no exception handling.
>> + */
>> +static inline void clear_user_highpages(struct page *page, unsigned long vaddr,
>> + unsigned int npages)
>> +{
>> + if (!IS_ENABLED(CONFIG_HIGHMEM)) {
>> + clear_user_pages(page_address(page), vaddr, page, npages);
>> + return;
>> + }
>> +
>> + do {
>> + clear_user_highpage(page, vaddr);
>> + vaddr += PAGE_SIZE;
>> + page++;
>> + } while (--npages);
>> +}
>> +
>> #ifndef vma_alloc_zeroed_movable_folio
>> /**
>> * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
>
> Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Thanks (for this and the others.)
--
ankur
© 2016 - 2026 Red Hat, Inc.