From: Ryan Roberts <ryan.roberts@arm.com>
Whenever vmalloc allocates high order pages (e.g. for a huge mapping) it
must immediately split_page() to order-0 so that it remains compatible
with users that want to access the underlying struct page.
Commit a06157804399 ("mm/vmalloc: request large order pages from buddy
allocator") recently made it much more likely for vmalloc to allocate
high order pages which are subsequently split to order-0.
Unfortunately this had the side effect of causing performance
regressions for tight vmalloc/vfree loops (e.g. test_vmalloc.ko
benchmarks). See Closes: tag. This happens because the high order pages
must be gotten from the buddy but then because they are split to
order-0, when they are freed they are freed to the order-0 pcp.
Previously allocation was for order-0 pages so they were recycled from
the pcp.
It would be preferable if when vmalloc allocates an (e.g.) order-3 page
that it also frees that order-3 page to the order-3 pcp, then the
regression could be removed.
So let's do exactly that; use the new __free_contig_range() API to
batch-free contiguous ranges of pfns. This not only removes the
regression, but significantly improves performance of vfree beyond the
baseline.
A selection of test_vmalloc benchmarks running on arm64 server class
system. mm-new is the baseline. Commit a06157804399 ("mm/vmalloc: request
large order pages from buddy allocator") was added in v6.19-rc1 where we
see regressions. Then with this change performance is much better. (>0
is faster, <0 is slower, (R)/(I) = statistically significant
Regression/Improvement):
+-----------------+----------------------------------------------------------+-------------------+--------------------+
| Benchmark | Result Class | mm-new | this series |
+=================+==========================================================+===================+====================+
| micromm/vmalloc | fix_align_alloc_test: p:1, h:0, l:500000 (usec) | 1331843.33 | (I) 67.17% |
| | fix_size_alloc_test: p:1, h:0, l:500000 (usec) | 415907.33 | -5.14% |
| | fix_size_alloc_test: p:4, h:0, l:500000 (usec) | 755448.00 | (I) 53.55% |
| | fix_size_alloc_test: p:16, h:0, l:500000 (usec) | 1591331.33 | (I) 57.26% |
| | fix_size_alloc_test: p:16, h:1, l:500000 (usec) | 1594345.67 | (I) 68.46% |
| | fix_size_alloc_test: p:64, h:0, l:100000 (usec) | 1071826.00 | (I) 79.27% |
| | fix_size_alloc_test: p:64, h:1, l:100000 (usec) | 1018385.00 | (I) 84.17% |
| | fix_size_alloc_test: p:256, h:0, l:100000 (usec) | 3970899.67 | (I) 77.01% |
| | fix_size_alloc_test: p:256, h:1, l:100000 (usec) | 3821788.67 | (I) 89.44% |
| | fix_size_alloc_test: p:512, h:0, l:100000 (usec) | 7795968.00 | (I) 82.67% |
| | fix_size_alloc_test: p:512, h:1, l:100000 (usec) | 6530169.67 | (I) 118.09% |
| | full_fit_alloc_test: p:1, h:0, l:500000 (usec) | 626808.33 | -0.98% |
| | kvfree_rcu_1_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 532145.67 | -1.68% |
| | kvfree_rcu_2_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 537032.67 | -0.96% |
| | long_busy_list_alloc_test: p:1, h:0, l:500000 (usec) | 8805069.00 | (I) 74.58% |
| | pcpu_alloc_test: p:1, h:0, l:500000 (usec) | 500824.67 | 4.35% |
| | random_size_align_alloc_test: p:1, h:0, l:500000 (usec) | 1637554.67 | (I) 76.99% |
| | random_size_alloc_test: p:1, h:0, l:500000 (usec) | 4556288.67 | (I) 72.23% |
| | vm_map_ram_test: p:1, h:0, l:500000 (usec) | 107371.00 | -0.70% |
+-----------------+----------------------------------------------------------+-------------------+--------------------+
Fixes: a06157804399 ("mm/vmalloc: request large order pages from buddy allocator")
Closes: https://lore.kernel.org/all/66919a28-bc81-49c9-b68f-dd7c73395a0d@arm.com/
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Co-developed-by: Muhammad Usama Anjum <usama.anjum@arm.com>
Signed-off-by: Muhammad Usama Anjum <usama.anjum@arm.com>
---
Changes since v1:
- Rebase on mm-new
- Rerun benchmarks
---
mm/vmalloc.c | 34 +++++++++++++++++++++++++---------
1 file changed, 25 insertions(+), 9 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c607307c657a6..8b935395fb068 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3459,18 +3459,34 @@ void vfree(const void *addr)
if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
vm_reset_perms(vm);
- for (i = 0; i < vm->nr_pages; i++) {
- struct page *page = vm->pages[i];
+
+ if (vm->nr_pages) {
+ bool account = !(vm->flags & VM_MAP_PUT_PAGES);
+ unsigned long start_pfn, pfn;
+ struct page *page = vm->pages[0];
+ int nr = 1;
BUG_ON(!page);
- /*
- * High-order allocs for huge vmallocs are split, so
- * can be freed as an array of order-0 allocations
- */
- if (!(vm->flags & VM_MAP_PUT_PAGES))
+ start_pfn = page_to_pfn(page);
+ if (account)
mod_lruvec_page_state(page, NR_VMALLOC, -1);
- __free_page(page);
- cond_resched();
+
+ for (i = 1; i < vm->nr_pages; i++) {
+ page = vm->pages[i];
+ BUG_ON(!page);
+ if (account)
+ mod_lruvec_page_state(page, NR_VMALLOC, -1);
+ pfn = page_to_pfn(page);
+ if (start_pfn + nr == pfn) {
+ nr++;
+ continue;
+ }
+ __free_contig_range(start_pfn, nr);
+ start_pfn = pfn;
+ nr = 1;
+ cond_resched();
+ }
+ __free_contig_range(start_pfn, nr);
}
kvfree(vm->pages);
kfree(vm);
--
2.47.3
On 3/16/26 12:31, Muhammad Usama Anjum wrote:
> From: Ryan Roberts <ryan.roberts@arm.com>
>
> Whenever vmalloc allocates high order pages (e.g. for a huge mapping) it
> must immediately split_page() to order-0 so that it remains compatible
> with users that want to access the underlying struct page.
> Commit a06157804399 ("mm/vmalloc: request large order pages from buddy
> allocator") recently made it much more likely for vmalloc to allocate
> high order pages which are subsequently split to order-0.
>
> Unfortunately this had the side effect of causing performance
> regressions for tight vmalloc/vfree loops (e.g. test_vmalloc.ko
> benchmarks). See Closes: tag. This happens because the high order pages
> must be gotten from the buddy but then because they are split to
> order-0, when they are freed they are freed to the order-0 pcp.
> Previously allocation was for order-0 pages so they were recycled from
> the pcp.
>
> It would be preferable if when vmalloc allocates an (e.g.) order-3 page
> that it also frees that order-3 page to the order-3 pcp, then the
> regression could be removed.
>
> So let's do exactly that; use the new __free_contig_range() API to
> batch-free contiguous ranges of pfns. This not only removes the
> regression, but significantly improves performance of vfree beyond the
> baseline.
>
> A selection of test_vmalloc benchmarks running on arm64 server class
> system. mm-new is the baseline. Commit a06157804399 ("mm/vmalloc: request
> large order pages from buddy allocator") was added in v6.19-rc1 where we
> see regressions. Then with this change performance is much better. (>0
> is faster, <0 is slower, (R)/(I) = statistically significant
> Regression/Improvement):
>
> +-----------------+----------------------------------------------------------+-------------------+--------------------+
> | Benchmark | Result Class | mm-new | this series |
> +=================+==========================================================+===================+====================+
> | micromm/vmalloc | fix_align_alloc_test: p:1, h:0, l:500000 (usec) | 1331843.33 | (I) 67.17% |
> | | fix_size_alloc_test: p:1, h:0, l:500000 (usec) | 415907.33 | -5.14% |
> | | fix_size_alloc_test: p:4, h:0, l:500000 (usec) | 755448.00 | (I) 53.55% |
> | | fix_size_alloc_test: p:16, h:0, l:500000 (usec) | 1591331.33 | (I) 57.26% |
> | | fix_size_alloc_test: p:16, h:1, l:500000 (usec) | 1594345.67 | (I) 68.46% |
> | | fix_size_alloc_test: p:64, h:0, l:100000 (usec) | 1071826.00 | (I) 79.27% |
> | | fix_size_alloc_test: p:64, h:1, l:100000 (usec) | 1018385.00 | (I) 84.17% |
> | | fix_size_alloc_test: p:256, h:0, l:100000 (usec) | 3970899.67 | (I) 77.01% |
> | | fix_size_alloc_test: p:256, h:1, l:100000 (usec) | 3821788.67 | (I) 89.44% |
> | | fix_size_alloc_test: p:512, h:0, l:100000 (usec) | 7795968.00 | (I) 82.67% |
> | | fix_size_alloc_test: p:512, h:1, l:100000 (usec) | 6530169.67 | (I) 118.09% |
> | | full_fit_alloc_test: p:1, h:0, l:500000 (usec) | 626808.33 | -0.98% |
> | | kvfree_rcu_1_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 532145.67 | -1.68% |
> | | kvfree_rcu_2_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 537032.67 | -0.96% |
> | | long_busy_list_alloc_test: p:1, h:0, l:500000 (usec) | 8805069.00 | (I) 74.58% |
> | | pcpu_alloc_test: p:1, h:0, l:500000 (usec) | 500824.67 | 4.35% |
> | | random_size_align_alloc_test: p:1, h:0, l:500000 (usec) | 1637554.67 | (I) 76.99% |
> | | random_size_alloc_test: p:1, h:0, l:500000 (usec) | 4556288.67 | (I) 72.23% |
> | | vm_map_ram_test: p:1, h:0, l:500000 (usec) | 107371.00 | -0.70% |
> +-----------------+----------------------------------------------------------+-------------------+--------------------+
>
> Fixes: a06157804399 ("mm/vmalloc: request large order pages from buddy allocator")
> Closes: https://lore.kernel.org/all/66919a28-bc81-49c9-b68f-dd7c73395a0d@arm.com/
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> Co-developed-by: Muhammad Usama Anjum <usama.anjum@arm.com>
> Signed-off-by: Muhammad Usama Anjum <usama.anjum@arm.com>
> ---
> Changes since v1:
> - Rebase on mm-new
> - Rerun benchmarks
> ---
> mm/vmalloc.c | 34 +++++++++++++++++++++++++---------
> 1 file changed, 25 insertions(+), 9 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index c607307c657a6..8b935395fb068 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -3459,18 +3459,34 @@ void vfree(const void *addr)
>
> if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
> vm_reset_perms(vm);
> - for (i = 0; i < vm->nr_pages; i++) {
> - struct page *page = vm->pages[i];
> +
> + if (vm->nr_pages) {
> + bool account = !(vm->flags & VM_MAP_PUT_PAGES);
> + unsigned long start_pfn, pfn;
> + struct page *page = vm->pages[0];
> + int nr = 1;
>
> BUG_ON(!page);
> - /*
> - * High-order allocs for huge vmallocs are split, so
> - * can be freed as an array of order-0 allocations
> - */
> - if (!(vm->flags & VM_MAP_PUT_PAGES))
> + start_pfn = page_to_pfn(page);
> + if (account)
> mod_lruvec_page_state(page, NR_VMALLOC, -1);
> - __free_page(page);
> - cond_resched();
> +
> + for (i = 1; i < vm->nr_pages; i++) {
> + page = vm->pages[i];
> + BUG_ON(!page);
We shouldn't be adding BUG_ON()'s. Rather demote also the pre-existing one
to VM_WARN_ON_ONCE() and skip gracefully.
> + if (account)
> + mod_lruvec_page_state(page, NR_VMALLOC, -1);
I think we should be able to batch this too to use "nr"?
> + pfn = page_to_pfn(page);
> + if (start_pfn + nr == pfn) {
> + nr++;
> + continue;
> + }
> + __free_contig_range(start_pfn, nr);
> + start_pfn = pfn;
> + nr = 1;
> + cond_resched();> + }
> + __free_contig_range(start_pfn, nr);
> }
> kvfree(vm->pages);
> kfree(vm);
On 3/16/26 16:49, Vlastimil Babka wrote:
> On 3/16/26 12:31, Muhammad Usama Anjum wrote:
>> From: Ryan Roberts <ryan.roberts@arm.com>
>>
>> Whenever vmalloc allocates high order pages (e.g. for a huge mapping) it
>> must immediately split_page() to order-0 so that it remains compatible
>> with users that want to access the underlying struct page.
>> Commit a06157804399 ("mm/vmalloc: request large order pages from buddy
>> allocator") recently made it much more likely for vmalloc to allocate
>> high order pages which are subsequently split to order-0.
>>
>> Unfortunately this had the side effect of causing performance
>> regressions for tight vmalloc/vfree loops (e.g. test_vmalloc.ko
>> benchmarks). See Closes: tag. This happens because the high order pages
>> must be gotten from the buddy but then because they are split to
>> order-0, when they are freed they are freed to the order-0 pcp.
>> Previously allocation was for order-0 pages so they were recycled from
>> the pcp.
>>
>> It would be preferable if when vmalloc allocates an (e.g.) order-3 page
>> that it also frees that order-3 page to the order-3 pcp, then the
>> regression could be removed.
>>
>> So let's do exactly that; use the new __free_contig_range() API to
>> batch-free contiguous ranges of pfns. This not only removes the
>> regression, but significantly improves performance of vfree beyond the
>> baseline.
>>
>> A selection of test_vmalloc benchmarks running on arm64 server class
>> system. mm-new is the baseline. Commit a06157804399 ("mm/vmalloc: request
>> large order pages from buddy allocator") was added in v6.19-rc1 where we
>> see regressions. Then with this change performance is much better. (>0
>> is faster, <0 is slower, (R)/(I) = statistically significant
>> Regression/Improvement):
>>
>> +-----------------+----------------------------------------------------------+-------------------+--------------------+
>> | Benchmark | Result Class | mm-new | this series |
>> +=================+==========================================================+===================+====================+
>> | micromm/vmalloc | fix_align_alloc_test: p:1, h:0, l:500000 (usec) | 1331843.33 | (I) 67.17% |
>> | | fix_size_alloc_test: p:1, h:0, l:500000 (usec) | 415907.33 | -5.14% |
>> | | fix_size_alloc_test: p:4, h:0, l:500000 (usec) | 755448.00 | (I) 53.55% |
>> | | fix_size_alloc_test: p:16, h:0, l:500000 (usec) | 1591331.33 | (I) 57.26% |
>> | | fix_size_alloc_test: p:16, h:1, l:500000 (usec) | 1594345.67 | (I) 68.46% |
>> | | fix_size_alloc_test: p:64, h:0, l:100000 (usec) | 1071826.00 | (I) 79.27% |
>> | | fix_size_alloc_test: p:64, h:1, l:100000 (usec) | 1018385.00 | (I) 84.17% |
>> | | fix_size_alloc_test: p:256, h:0, l:100000 (usec) | 3970899.67 | (I) 77.01% |
>> | | fix_size_alloc_test: p:256, h:1, l:100000 (usec) | 3821788.67 | (I) 89.44% |
>> | | fix_size_alloc_test: p:512, h:0, l:100000 (usec) | 7795968.00 | (I) 82.67% |
>> | | fix_size_alloc_test: p:512, h:1, l:100000 (usec) | 6530169.67 | (I) 118.09% |
>> | | full_fit_alloc_test: p:1, h:0, l:500000 (usec) | 626808.33 | -0.98% |
>> | | kvfree_rcu_1_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 532145.67 | -1.68% |
>> | | kvfree_rcu_2_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 537032.67 | -0.96% |
>> | | long_busy_list_alloc_test: p:1, h:0, l:500000 (usec) | 8805069.00 | (I) 74.58% |
>> | | pcpu_alloc_test: p:1, h:0, l:500000 (usec) | 500824.67 | 4.35% |
>> | | random_size_align_alloc_test: p:1, h:0, l:500000 (usec) | 1637554.67 | (I) 76.99% |
>> | | random_size_alloc_test: p:1, h:0, l:500000 (usec) | 4556288.67 | (I) 72.23% |
>> | | vm_map_ram_test: p:1, h:0, l:500000 (usec) | 107371.00 | -0.70% |
>> +-----------------+----------------------------------------------------------+-------------------+--------------------+
>>
>> Fixes: a06157804399 ("mm/vmalloc: request large order pages from buddy allocator")
>> Closes: https://lore.kernel.org/all/66919a28-bc81-49c9-b68f-dd7c73395a0d@arm.com/
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> Co-developed-by: Muhammad Usama Anjum <usama.anjum@arm.com>
>> Signed-off-by: Muhammad Usama Anjum <usama.anjum@arm.com>
>> ---
>> Changes since v1:
>> - Rebase on mm-new
>> - Rerun benchmarks
>> ---
>> mm/vmalloc.c | 34 +++++++++++++++++++++++++---------
>> 1 file changed, 25 insertions(+), 9 deletions(-)
>>
>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
>> index c607307c657a6..8b935395fb068 100644
>> --- a/mm/vmalloc.c
>> +++ b/mm/vmalloc.c
>> @@ -3459,18 +3459,34 @@ void vfree(const void *addr)
>>
>> if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
>> vm_reset_perms(vm);
>> - for (i = 0; i < vm->nr_pages; i++) {
>> - struct page *page = vm->pages[i];
>> +
>> + if (vm->nr_pages) {
>> + bool account = !(vm->flags & VM_MAP_PUT_PAGES);
>> + unsigned long start_pfn, pfn;
>> + struct page *page = vm->pages[0];
>> + int nr = 1;
>>
>> BUG_ON(!page);
>> - /*
>> - * High-order allocs for huge vmallocs are split, so
>> - * can be freed as an array of order-0 allocations
>> - */
>> - if (!(vm->flags & VM_MAP_PUT_PAGES))
>> + start_pfn = page_to_pfn(page);
>> + if (account)
>> mod_lruvec_page_state(page, NR_VMALLOC, -1);
>> - __free_page(page);
>> - cond_resched();
>> +
>> + for (i = 1; i < vm->nr_pages; i++) {
>> + page = vm->pages[i];
>> + BUG_ON(!page);
>
> We shouldn't be adding BUG_ON()'s. Rather demote also the pre-existing one
> to VM_WARN_ON_ONCE() and skip gracefully.
>
>> + if (account)
>> + mod_lruvec_page_state(page, NR_VMALLOC, -1);
>
> I think we should be able to batch this too to use "nr"?
Are we sure that pages cannot cross nodes etc? It could happen that we
have a contig range that spans zones/nodes/etc ...
Anyhow, should we try to decouple both things, providing a
core-mm function to do the page freeing?
We do have something similar, optimized unpinning of large folios,
in unpin_user_pages_dirty_lock(). This here is a bit different.
So what I am thinking about for this code here to do:
if (!(vm->flags & VM_MAP_PUT_PAGES)) {
for (i = 0; i < vm->nr_pages; i++)
mod_lruvec_page_state(vm->pages[i], NR_VMALLOC, -1);
}
free_pages_bulk(vm->pages, vm->nr_pages);
We could optimize the first loop to do batching where possible as well.
free_pages_bulk() would match alloc_pages_bulk()
void free_pages_bulk(struct page **page_array, unsigned long nr_pages)
Internally we'd do the contig handling.
Was that already discussed?
--
Cheers,
David
On 3/20/26 09:39, David Hildenbrand (Arm) wrote:
> On 3/16/26 16:49, Vlastimil Babka wrote:
>>> mm/vmalloc.c | 34 +++++++++++++++++++++++++---------
>>> 1 file changed, 25 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
>>> index c607307c657a6..8b935395fb068 100644
>>> --- a/mm/vmalloc.c
>>> +++ b/mm/vmalloc.c
>>> @@ -3459,18 +3459,34 @@ void vfree(const void *addr)
>>>
>>> if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
>>> vm_reset_perms(vm);
>>> - for (i = 0; i < vm->nr_pages; i++) {
>>> - struct page *page = vm->pages[i];
>>> +
>>> + if (vm->nr_pages) {
>>> + bool account = !(vm->flags & VM_MAP_PUT_PAGES);
>>> + unsigned long start_pfn, pfn;
>>> + struct page *page = vm->pages[0];
>>> + int nr = 1;
>>>
>>> BUG_ON(!page);
>>> - /*
>>> - * High-order allocs for huge vmallocs are split, so
>>> - * can be freed as an array of order-0 allocations
>>> - */
>>> - if (!(vm->flags & VM_MAP_PUT_PAGES))
>>> + start_pfn = page_to_pfn(page);
>>> + if (account)
>>> mod_lruvec_page_state(page, NR_VMALLOC, -1);
>>> - __free_page(page);
>>> - cond_resched();
>>> +
>>> + for (i = 1; i < vm->nr_pages; i++) {
>>> + page = vm->pages[i];
>>> + BUG_ON(!page);
>>
>> We shouldn't be adding BUG_ON()'s. Rather demote also the pre-existing one
>> to VM_WARN_ON_ONCE() and skip gracefully.
>>
>>> + if (account)
>>> + mod_lruvec_page_state(page, NR_VMALLOC, -1);
>>
>> I think we should be able to batch this too to use "nr"?
>
> Are we sure that pages cannot cross nodes etc? It could happen that we
> have a contig range that spans zones/nodes/etc ...
Hmm single order-3 allocation can't but we could be unlucky and get the last
order-3 from zone X and first order-3 from adjacent zone Y.
In that case the loop would need to also check same zone/node.
> Anyhow, should we try to decouple both things, providing a
> core-mm function to do the page freeing?
>
> We do have something similar, optimized unpinning of large folios,
> in unpin_user_pages_dirty_lock(). This here is a bit different.
>
>
> So what I am thinking about for this code here to do:
>
> if (!(vm->flags & VM_MAP_PUT_PAGES)) {
> for (i = 0; i < vm->nr_pages; i++)
> mod_lruvec_page_state(vm->pages[i], NR_VMALLOC, -1);
> }
> free_pages_bulk(vm->pages, vm->nr_pages);
>
>
> We could optimize the first loop to do batching where possible as well.
>
>
> free_pages_bulk() would match alloc_pages_bulk()
>
> void free_pages_bulk(struct page **page_array, unsigned long nr_pages)
>
> Internally we'd do the contig handling.
>
> Was that already discussed?
AFAIU some of Zi's replies hinted at this direction. It would make sense, yeah.
On 20/03/2026 2:33 pm, Vlastimil Babka (SUSE) wrote:
> On 3/20/26 09:39, David Hildenbrand (Arm) wrote:
>> On 3/16/26 16:49, Vlastimil Babka wrote:
>>>> mm/vmalloc.c | 34 +++++++++++++++++++++++++---------
>>>> 1 file changed, 25 insertions(+), 9 deletions(-)
>>>>
>>>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
>>>> index c607307c657a6..8b935395fb068 100644
>>>> --- a/mm/vmalloc.c
>>>> +++ b/mm/vmalloc.c
>>>> @@ -3459,18 +3459,34 @@ void vfree(const void *addr)
>>>>
>>>> if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
>>>> vm_reset_perms(vm);
>>>> - for (i = 0; i < vm->nr_pages; i++) {
>>>> - struct page *page = vm->pages[i];
>>>> +
>>>> + if (vm->nr_pages) {
>>>> + bool account = !(vm->flags & VM_MAP_PUT_PAGES);
>>>> + unsigned long start_pfn, pfn;
>>>> + struct page *page = vm->pages[0];
>>>> + int nr = 1;
>>>>
>>>> BUG_ON(!page);
>>>> - /*
>>>> - * High-order allocs for huge vmallocs are split, so
>>>> - * can be freed as an array of order-0 allocations
>>>> - */
>>>> - if (!(vm->flags & VM_MAP_PUT_PAGES))
>>>> + start_pfn = page_to_pfn(page);
>>>> + if (account)
>>>> mod_lruvec_page_state(page, NR_VMALLOC, -1);
>>>> - __free_page(page);
>>>> - cond_resched();
>>>> +
>>>> + for (i = 1; i < vm->nr_pages; i++) {
>>>> + page = vm->pages[i];
>>>> + BUG_ON(!page);
>>>
>>> We shouldn't be adding BUG_ON()'s. Rather demote also the pre-existing one
>>> to VM_WARN_ON_ONCE() and skip gracefully.
>>>
>>>> + if (account)
>>>> + mod_lruvec_page_state(page, NR_VMALLOC, -1);
>>>
>>> I think we should be able to batch this too to use "nr"?
>>
>> Are we sure that pages cannot cross nodes etc? It could happen that we
>> have a contig range that spans zones/nodes/etc ...
>
> Hmm single order-3 allocation can't but we could be unlucky and get the last
> order-3 from zone X and first order-3 from adjacent zone Y.
> In that case the loop would need to also check same zone/node.
>
>> Anyhow, should we try to decouple both things, providing a
>> core-mm function to do the page freeing?
>>
>> We do have something similar, optimized unpinning of large folios,
>> in unpin_user_pages_dirty_lock(). This here is a bit different.
>>
>>
>> So what I am thinking about for this code here to do:
>>
>> if (!(vm->flags & VM_MAP_PUT_PAGES)) {
>> for (i = 0; i < vm->nr_pages; i++)
>> mod_lruvec_page_state(vm->pages[i], NR_VMALLOC, -1);
>> }
>> free_pages_bulk(vm->pages, vm->nr_pages);
>>
>>
>> We could optimize the first loop to do batching where possible as well.
>>
>>
>> free_pages_bulk() would match alloc_pages_bulk()
>>
>> void free_pages_bulk(struct page **page_array, unsigned long nr_pages)
>>
>> Internally we'd do the contig handling.
>>
>> Was that already discussed?
>
> AFAIU some of Zi's replies hinted at this direction. It would make sense, yeah.
I'm updating and will send next version.
Thanks,
Usama
On 16/03/2026 3:49 pm, Vlastimil Babka wrote:
> On 3/16/26 12:31, Muhammad Usama Anjum wrote:
>> From: Ryan Roberts <ryan.roberts@arm.com>
>>
>> Whenever vmalloc allocates high order pages (e.g. for a huge mapping) it
>> must immediately split_page() to order-0 so that it remains compatible
>> with users that want to access the underlying struct page.
>> Commit a06157804399 ("mm/vmalloc: request large order pages from buddy
>> allocator") recently made it much more likely for vmalloc to allocate
>> high order pages which are subsequently split to order-0.
>>
>> Unfortunately this had the side effect of causing performance
>> regressions for tight vmalloc/vfree loops (e.g. test_vmalloc.ko
>> benchmarks). See Closes: tag. This happens because the high order pages
>> must be gotten from the buddy but then because they are split to
>> order-0, when they are freed they are freed to the order-0 pcp.
>> Previously allocation was for order-0 pages so they were recycled from
>> the pcp.
>>
>> It would be preferable if when vmalloc allocates an (e.g.) order-3 page
>> that it also frees that order-3 page to the order-3 pcp, then the
>> regression could be removed.
>>
>> So let's do exactly that; use the new __free_contig_range() API to
>> batch-free contiguous ranges of pfns. This not only removes the
>> regression, but significantly improves performance of vfree beyond the
>> baseline.
>>
>> A selection of test_vmalloc benchmarks running on arm64 server class
>> system. mm-new is the baseline. Commit a06157804399 ("mm/vmalloc: request
>> large order pages from buddy allocator") was added in v6.19-rc1 where we
>> see regressions. Then with this change performance is much better. (>0
>> is faster, <0 is slower, (R)/(I) = statistically significant
>> Regression/Improvement):
>>
>> +-----------------+----------------------------------------------------------+-------------------+--------------------+
>> | Benchmark | Result Class | mm-new | this series |
>> +=================+==========================================================+===================+====================+
>> | micromm/vmalloc | fix_align_alloc_test: p:1, h:0, l:500000 (usec) | 1331843.33 | (I) 67.17% |
>> | | fix_size_alloc_test: p:1, h:0, l:500000 (usec) | 415907.33 | -5.14% |
>> | | fix_size_alloc_test: p:4, h:0, l:500000 (usec) | 755448.00 | (I) 53.55% |
>> | | fix_size_alloc_test: p:16, h:0, l:500000 (usec) | 1591331.33 | (I) 57.26% |
>> | | fix_size_alloc_test: p:16, h:1, l:500000 (usec) | 1594345.67 | (I) 68.46% |
>> | | fix_size_alloc_test: p:64, h:0, l:100000 (usec) | 1071826.00 | (I) 79.27% |
>> | | fix_size_alloc_test: p:64, h:1, l:100000 (usec) | 1018385.00 | (I) 84.17% |
>> | | fix_size_alloc_test: p:256, h:0, l:100000 (usec) | 3970899.67 | (I) 77.01% |
>> | | fix_size_alloc_test: p:256, h:1, l:100000 (usec) | 3821788.67 | (I) 89.44% |
>> | | fix_size_alloc_test: p:512, h:0, l:100000 (usec) | 7795968.00 | (I) 82.67% |
>> | | fix_size_alloc_test: p:512, h:1, l:100000 (usec) | 6530169.67 | (I) 118.09% |
>> | | full_fit_alloc_test: p:1, h:0, l:500000 (usec) | 626808.33 | -0.98% |
>> | | kvfree_rcu_1_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 532145.67 | -1.68% |
>> | | kvfree_rcu_2_arg_vmalloc_test: p:1, h:0, l:500000 (usec) | 537032.67 | -0.96% |
>> | | long_busy_list_alloc_test: p:1, h:0, l:500000 (usec) | 8805069.00 | (I) 74.58% |
>> | | pcpu_alloc_test: p:1, h:0, l:500000 (usec) | 500824.67 | 4.35% |
>> | | random_size_align_alloc_test: p:1, h:0, l:500000 (usec) | 1637554.67 | (I) 76.99% |
>> | | random_size_alloc_test: p:1, h:0, l:500000 (usec) | 4556288.67 | (I) 72.23% |
>> | | vm_map_ram_test: p:1, h:0, l:500000 (usec) | 107371.00 | -0.70% |
>> +-----------------+----------------------------------------------------------+-------------------+--------------------+
>>
>> Fixes: a06157804399 ("mm/vmalloc: request large order pages from buddy allocator")
>> Closes: https://lore.kernel.org/all/66919a28-bc81-49c9-b68f-dd7c73395a0d@arm.com/
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> Co-developed-by: Muhammad Usama Anjum <usama.anjum@arm.com>
>> Signed-off-by: Muhammad Usama Anjum <usama.anjum@arm.com>
>> ---
>> Changes since v1:
>> - Rebase on mm-new
>> - Rerun benchmarks
>> ---
>> mm/vmalloc.c | 34 +++++++++++++++++++++++++---------
>> 1 file changed, 25 insertions(+), 9 deletions(-)
>>
>> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
>> index c607307c657a6..8b935395fb068 100644
>> --- a/mm/vmalloc.c
>> +++ b/mm/vmalloc.c
>> @@ -3459,18 +3459,34 @@ void vfree(const void *addr)
>>
>> if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
>> vm_reset_perms(vm);
>> - for (i = 0; i < vm->nr_pages; i++) {
>> - struct page *page = vm->pages[i];
>> +
>> + if (vm->nr_pages) {
>> + bool account = !(vm->flags & VM_MAP_PUT_PAGES);
>> + unsigned long start_pfn, pfn;
>> + struct page *page = vm->pages[0];
>> + int nr = 1;
>>
>> BUG_ON(!page);
>> - /*
>> - * High-order allocs for huge vmallocs are split, so
>> - * can be freed as an array of order-0 allocations
>> - */
>> - if (!(vm->flags & VM_MAP_PUT_PAGES))
>> + start_pfn = page_to_pfn(page);
>> + if (account)
>> mod_lruvec_page_state(page, NR_VMALLOC, -1);
>> - __free_page(page);
>> - cond_resched();
>> +
>> + for (i = 1; i < vm->nr_pages; i++) {
>> + page = vm->pages[i];
>> + BUG_ON(!page);
>
> We shouldn't be adding BUG_ON()'s. Rather demote also the pre-existing one
> to VM_WARN_ON_ONCE() and skip gracefully.
Sure, I'll replace it with WARN_ON_ONE() instead which returns the condition
result as well for easier skip logic.
>
>> + if (account)
>> + mod_lruvec_page_state(page, NR_VMALLOC, -1);
>
> I think we should be able to batch this too to use "nr"?
Yes, I'll update in the next version.
>
>> + pfn = page_to_pfn(page);
>> + if (start_pfn + nr == pfn) {
>> + nr++;
>> + continue;
>> + }
>> + __free_contig_range(start_pfn, nr);
>> + start_pfn = pfn;
>> + nr = 1;
>> + cond_resched();> + }
>> + __free_contig_range(start_pfn, nr);
>> }
>> kvfree(vm->pages);
>> kfree(vm);
>
© 2016 - 2026 Red Hat, Inc.