From: Shivam Kalra <shivamkalra98@zohomail.in>
Extract the page-freeing loop and NR_VMALLOC stat accounting from
vfree() into a reusable vmalloc_free_pages() helper. The helper operates
on a range [start, end) of pages from a vm_struct, making it suitable
for both full free (vfree) and partial free (upcoming vrealloc shrink).
No functional change.
Signed-off-by: Shivam Kalra <shivamkalra98@zohomail.in>
---
mm/vmalloc.c | 46 ++++++++++++++++++++++++++++++++--------------
1 file changed, 32 insertions(+), 14 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c607307c657a..42ae68450a90 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3416,6 +3416,36 @@ void vfree_atomic(const void *addr)
schedule_work(&p->wq);
}
+/*
+ * vmalloc_free_pages - free a range of pages from a vmalloc allocation
+ * @vm: the vm_struct containing the pages
+ * @start: first page index to free (inclusive)
+ * @end: last page index to free (exclusive)
+ *
+ * Free pages [start, end) updating NR_VMALLOC stat accounting.
+ * Caller is responsible for unmapping (vunmap_range) and KASAN
+ * poisoning before calling this.
+ */
+static void vmalloc_free_pages(struct vm_struct *vm, unsigned int start,
+ unsigned int end)
+{
+ unsigned int i;
+
+ for (i = start; i < end; i++) {
+ struct page *page = vm->pages[i];
+
+ BUG_ON(!page);
+ /*
+ * High-order allocs for huge vmallocs are split, so
+ * can be freed as an array of order-0 allocations
+ */
+ if (!(vm->flags & VM_MAP_PUT_PAGES))
+ mod_lruvec_page_state(page, NR_VMALLOC, -1);
+ __free_page(page);
+ cond_resched();
+ }
+}
+
/**
* vfree - Release memory allocated by vmalloc()
* @addr: Memory base address
@@ -3436,7 +3466,6 @@ void vfree_atomic(const void *addr)
void vfree(const void *addr)
{
struct vm_struct *vm;
- int i;
if (unlikely(in_interrupt())) {
vfree_atomic(addr);
@@ -3459,19 +3488,8 @@ void vfree(const void *addr)
if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
vm_reset_perms(vm);
- for (i = 0; i < vm->nr_pages; i++) {
- struct page *page = vm->pages[i];
-
- BUG_ON(!page);
- /*
- * High-order allocs for huge vmallocs are split, so
- * can be freed as an array of order-0 allocations
- */
- if (!(vm->flags & VM_MAP_PUT_PAGES))
- mod_lruvec_page_state(page, NR_VMALLOC, -1);
- __free_page(page);
- cond_resched();
- }
+ if (vm->nr_pages)
+ vmalloc_free_pages(vm, 0, vm->nr_pages);
kvfree(vm->pages);
kfree(vm);
}
--
2.43.0
On Mon, Mar 09, 2026 at 05:25:45PM +0530, Shivam Kalra via B4 Relay wrote:
> From: Shivam Kalra <shivamkalra98@zohomail.in>
>
> Extract the page-freeing loop and NR_VMALLOC stat accounting from
> vfree() into a reusable vmalloc_free_pages() helper. The helper operates
> on a range [start, end) of pages from a vm_struct, making it suitable
> for both full free (vfree) and partial free (upcoming vrealloc shrink).
>
> No functional change.
>
> Signed-off-by: Shivam Kalra <shivamkalra98@zohomail.in>
> ---
> mm/vmalloc.c | 46 ++++++++++++++++++++++++++++++++--------------
> 1 file changed, 32 insertions(+), 14 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index c607307c657a..42ae68450a90 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -3416,6 +3416,36 @@ void vfree_atomic(const void *addr)
> schedule_work(&p->wq);
> }
>
> +/*
> + * vmalloc_free_pages - free a range of pages from a vmalloc allocation
> + * @vm: the vm_struct containing the pages
> + * @start: first page index to free (inclusive)
> + * @end: last page index to free (exclusive)
> + *
> + * Free pages [start, end) updating NR_VMALLOC stat accounting.
> + * Caller is responsible for unmapping (vunmap_range) and KASAN
> + * poisoning before calling this.
> + */
> +static void vmalloc_free_pages(struct vm_struct *vm, unsigned int start,
> + unsigned int end)
> +{
> + unsigned int i;
> +
> + for (i = start; i < end; i++) {
> + struct page *page = vm->pages[i];
> +
> + BUG_ON(!page);
> + /*
> + * High-order allocs for huge vmallocs are split, so
> + * can be freed as an array of order-0 allocations
> + */
> + if (!(vm->flags & VM_MAP_PUT_PAGES))
> + mod_lruvec_page_state(page, NR_VMALLOC, -1);
> + __free_page(page);
> + cond_resched();
> + }
> +}
> +
> /**
> * vfree - Release memory allocated by vmalloc()
> * @addr: Memory base address
> @@ -3436,7 +3466,6 @@ void vfree_atomic(const void *addr)
> void vfree(const void *addr)
> {
> struct vm_struct *vm;
> - int i;
>
> if (unlikely(in_interrupt())) {
> vfree_atomic(addr);
> @@ -3459,19 +3488,8 @@ void vfree(const void *addr)
>
> if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
> vm_reset_perms(vm);
> - for (i = 0; i < vm->nr_pages; i++) {
> - struct page *page = vm->pages[i];
> -
> - BUG_ON(!page);
> - /*
> - * High-order allocs for huge vmallocs are split, so
> - * can be freed as an array of order-0 allocations
> - */
> - if (!(vm->flags & VM_MAP_PUT_PAGES))
> - mod_lruvec_page_state(page, NR_VMALLOC, -1);
> - __free_page(page);
> - cond_resched();
> - }
> + if (vm->nr_pages)
> + vmalloc_free_pages(vm, 0, vm->nr_pages);
> kvfree(vm->pages);
> kfree(vm);
> }
>
> --
> 2.43.0
>
>
I posted my comment for the v2. Same here:
probably we should move "if (vm->nr_pages)" condition inside the
vmalloc_free_pages().
I think, the function name should also be renamed to something like
vm_area_free_pages() so we align with vm_area_alloc_pages() helper.
--
Uladzislau Rezki
On 11/03/26 23:04, Uladzislau Rezki wrote:
> On Mon, Mar 09, 2026 at 05:25:45PM +0530, Shivam Kalra via B4 Relay wrote:
>> From: Shivam Kalra <shivamkalra98@zohomail.in>
>>
>> Extract the page-freeing loop and NR_VMALLOC stat accounting from
>> vfree() into a reusable vmalloc_free_pages() helper. The helper operates
>> on a range [start, end) of pages from a vm_struct, making it suitable
>> for both full free (vfree) and partial free (upcoming vrealloc shrink).
>>
>> No functional change.
>>
>> Signed-off-by: Shivam Kalra <shivamkalra98@zohomail.in>
>> ---
>> mm/vmalloc.c | 46 ++++++++++++++++++++++++++++++++--------------
>> 1 file changed, 32 insertions(+), 14 deletions(-)
>>
>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
>> index c607307c657a..42ae68450a90 100644
>> --- a/mm/vmalloc.c
>> +++ b/mm/vmalloc.c
>> @@ -3416,6 +3416,36 @@ void vfree_atomic(const void *addr)
>> schedule_work(&p->wq);
>> }
>>
>> +/*
>> + * vmalloc_free_pages - free a range of pages from a vmalloc allocation
>> + * @vm: the vm_struct containing the pages
>> + * @start: first page index to free (inclusive)
>> + * @end: last page index to free (exclusive)
>> + *
>> + * Free pages [start, end) updating NR_VMALLOC stat accounting.
>> + * Caller is responsible for unmapping (vunmap_range) and KASAN
>> + * poisoning before calling this.
>> + */
>> +static void vmalloc_free_pages(struct vm_struct *vm, unsigned int start,
>> + unsigned int end)
>> +{
>> + unsigned int i;
>> +
>> + for (i = start; i < end; i++) {
>> + struct page *page = vm->pages[i];
>> +
>> + BUG_ON(!page);
>> + /*
>> + * High-order allocs for huge vmallocs are split, so
>> + * can be freed as an array of order-0 allocations
>> + */
>> + if (!(vm->flags & VM_MAP_PUT_PAGES))
>> + mod_lruvec_page_state(page, NR_VMALLOC, -1);
>> + __free_page(page);
>> + cond_resched();
>> + }
>> +}
>> +
>> /**
>> * vfree - Release memory allocated by vmalloc()
>> * @addr: Memory base address
>> @@ -3436,7 +3466,6 @@ void vfree_atomic(const void *addr)
>> void vfree(const void *addr)
>> {
>> struct vm_struct *vm;
>> - int i;
>>
>> if (unlikely(in_interrupt())) {
>> vfree_atomic(addr);
>> @@ -3459,19 +3488,8 @@ void vfree(const void *addr)
>>
>> if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
>> vm_reset_perms(vm);
>> - for (i = 0; i < vm->nr_pages; i++) {
>> - struct page *page = vm->pages[i];
>> -
>> - BUG_ON(!page);
>> - /*
>> - * High-order allocs for huge vmallocs are split, so
>> - * can be freed as an array of order-0 allocations
>> - */
>> - if (!(vm->flags & VM_MAP_PUT_PAGES))
>> - mod_lruvec_page_state(page, NR_VMALLOC, -1);
>> - __free_page(page);
>> - cond_resched();
>> - }
>> + if (vm->nr_pages)
>> + vmalloc_free_pages(vm, 0, vm->nr_pages);
>> kvfree(vm->pages);
>> kfree(vm);
>> }
>>
>> --
>> 2.43.0
>>
>>
> I posted my comment for the v2. Same here:
>
> probably we should move "if (vm->nr_pages)" condition inside the
> vmalloc_free_pages().
>
> I think, the function name should also be renamed to something like
> vm_area_free_pages() so we align with vm_area_alloc_pages() helper.
>
> --
> Uladzislau Rezki
We might not even need this check, because nr_pages is zero and the
loop that runs inside the function would be a no-op, as end would
be zero.
© 2016 - 2026 Red Hat, Inc.