From: Hui Zhu <zhuhui@kylinos.cn>
This commit add new function vrealloc_align.
vrealloc_align support allocation of aligned vmap pages with
__vmalloc_node_noprof.
And vrealloc_align will check the old address. If this address does
not meet the current alignment requirements, it will also release
the old vmap pages and reallocate new vmap pages that satisfy the
alignment requirements.
Co-developed-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Geliang Tang <geliang@kernel.org>
Signed-off-by: Hui Zhu <zhuhui@kylinos.cn>
---
include/linux/vmalloc.h | 5 +++
mm/vmalloc.c | 80 ++++++++++++++++++++++++++---------------
2 files changed, 57 insertions(+), 28 deletions(-)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index fdc9aeb74a44..0ce0c1ea2427 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -201,6 +201,11 @@ void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
__realloc_size(2);
#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))
+void * __must_check vrealloc_align_noprof(const void *p, size_t size,
+ size_t align, gfp_t flags)
+ __realloc_size(2);
+#define vrealloc_align(...) alloc_hooks(vrealloc_align_noprof(__VA_ARGS__))
+
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ab986dd09b6a..41cb3603b3cc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4081,9 +4081,11 @@ void *vzalloc_node_noprof(unsigned long size, int node)
EXPORT_SYMBOL(vzalloc_node_noprof);
/**
- * vrealloc - reallocate virtually contiguous memory; contents remain unchanged
+ * vrealloc_align - reallocate virtually contiguous memory;
+ * contents remain unchanged
* @p: object to reallocate memory for
* @size: the size to reallocate
+ * @align: requested alignment
* @flags: the flags for the page level allocator
*
* If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and
@@ -4103,7 +4105,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof);
* Return: pointer to the allocated memory; %NULL if @size is zero or in case of
* failure
*/
-void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+void *vrealloc_align_noprof(const void *p, size_t size, size_t align,
+ gfp_t flags)
{
struct vm_struct *vm = NULL;
size_t alloced_size = 0;
@@ -4116,49 +4119,65 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
}
if (p) {
+ if (!is_power_of_2(align)) {
+ WARN(1, "Trying to vrealloc_align() align is not power of 2 (%ld)\n",
+ align);
+ return NULL;
+ }
+
vm = find_vm_area(p);
if (unlikely(!vm)) {
- WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p);
+ WARN(1, "Trying to vrealloc_align() nonexistent vm area (%p)\n", p);
return NULL;
}
alloced_size = get_vm_area_size(vm);
old_size = vm->requested_size;
if (WARN(alloced_size < old_size,
- "vrealloc() has mismatched area vs requested sizes (%p)\n", p))
+ "vrealloc_align() has mismatched area vs requested sizes (%p)\n", p))
return NULL;
}
- /*
- * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
- * would be a good heuristic for when to shrink the vm_area?
- */
- if (size <= old_size) {
- /* Zero out "freed" memory, potentially for future realloc. */
- if (want_init_on_free() || want_init_on_alloc(flags))
- memset((void *)p + size, 0, old_size - size);
- vm->requested_size = size;
- kasan_poison_vmalloc(p + size, old_size - size);
- return (void *)p;
- }
+ if (IS_ALIGNED((unsigned long)p, align)) {
+ /*
+ * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What
+ * would be a good heuristic for when to shrink the vm_area?
+ */
+ if (size <= old_size) {
+ /* Zero out "freed" memory, potentially for future realloc. */
+ if (want_init_on_free() || want_init_on_alloc(flags))
+ memset((void *)p + size, 0, old_size - size);
+ vm->requested_size = size;
+ kasan_poison_vmalloc(p + size, old_size - size);
+ return (void *)p;
+ }
- /*
- * We already have the bytes available in the allocation; use them.
- */
- if (size <= alloced_size) {
- kasan_unpoison_vmalloc(p + old_size, size - old_size,
- KASAN_VMALLOC_PROT_NORMAL);
/*
- * No need to zero memory here, as unused memory will have
- * already been zeroed at initial allocation time or during
- * realloc shrink time.
+ * We already have the bytes available in the allocation; use them.
+ */
+ if (size <= alloced_size) {
+ kasan_unpoison_vmalloc(p + old_size, size - old_size,
+ KASAN_VMALLOC_PROT_NORMAL);
+ /*
+ * No need to zero memory here, as unused memory will have
+ * already been zeroed at initial allocation time or during
+ * realloc shrink time.
+ */
+ vm->requested_size = size;
+ return (void *)p;
+ }
+ } else {
+ /*
+ * p is not aligned with align.
+ * Allocate a new address to handle it.
*/
- vm->requested_size = size;
- return (void *)p;
+ if (size < old_size)
+ old_size = size;
}
/* TODO: Grow the vm_area, i.e. allocate and map additional pages. */
- n = __vmalloc_noprof(size, flags);
+ n = __vmalloc_node_noprof(size, align, flags, NUMA_NO_NODE,
+ __builtin_return_address(0));
if (!n)
return NULL;
@@ -4170,6 +4189,11 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
return n;
}
+void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
+{
+ return vrealloc_align_noprof(p, size, 1, flags);
+}
+
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
--
2.43.0
On Tue, Jul 15, 2025 at 05:59:46PM +0800, Hui Zhu wrote: > From: Hui Zhu <zhuhui@kylinos.cn> > > This commit add new function vrealloc_align. > vrealloc_align support allocation of aligned vmap pages with > __vmalloc_node_noprof. > And vrealloc_align will check the old address. If this address does > not meet the current alignment requirements, it will also release > the old vmap pages and reallocate new vmap pages that satisfy the > alignment requirements. > > Co-developed-by: Geliang Tang <geliang@kernel.org> > Signed-off-by: Geliang Tang <geliang@kernel.org> > Signed-off-by: Hui Zhu <zhuhui@kylinos.cn> > --- > include/linux/vmalloc.h | 5 +++ > mm/vmalloc.c | 80 ++++++++++++++++++++++++++--------------- > 2 files changed, 57 insertions(+), 28 deletions(-) > > diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h > index fdc9aeb74a44..0ce0c1ea2427 100644 > --- a/include/linux/vmalloc.h > +++ b/include/linux/vmalloc.h > @@ -201,6 +201,11 @@ void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags) > __realloc_size(2); > #define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__)) > > +void * __must_check vrealloc_align_noprof(const void *p, size_t size, > + size_t align, gfp_t flags) > + __realloc_size(2); > +#define vrealloc_align(...) alloc_hooks(vrealloc_align_noprof(__VA_ARGS__)) > + > extern void vfree(const void *addr); > extern void vfree_atomic(const void *addr); > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index ab986dd09b6a..41cb3603b3cc 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -4081,9 +4081,11 @@ void *vzalloc_node_noprof(unsigned long size, int node) > EXPORT_SYMBOL(vzalloc_node_noprof); > > /** > - * vrealloc - reallocate virtually contiguous memory; contents remain unchanged > + * vrealloc_align - reallocate virtually contiguous memory; > + * contents remain unchanged > * @p: object to reallocate memory for > * @size: the size to reallocate > + * @align: requested alignment > * @flags: the flags for the page level allocator > * > * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and > @@ -4103,7 +4105,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof); > * Return: pointer to the allocated memory; %NULL if @size is zero or in case of > * failure > */ > -void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) > +void *vrealloc_align_noprof(const void *p, size_t size, size_t align, > + gfp_t flags) > { > struct vm_struct *vm = NULL; > size_t alloced_size = 0; > @@ -4116,49 +4119,65 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) > } > > if (p) { > + if (!is_power_of_2(align)) { > + WARN(1, "Trying to vrealloc_align() align is not power of 2 (%ld)\n", > + align); > + return NULL; > + } > + > vm = find_vm_area(p); > if (unlikely(!vm)) { > - WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p); > + WARN(1, "Trying to vrealloc_align() nonexistent vm area (%p)\n", p); > return NULL; > } > > alloced_size = get_vm_area_size(vm); > old_size = vm->requested_size; > if (WARN(alloced_size < old_size, > - "vrealloc() has mismatched area vs requested sizes (%p)\n", p)) > + "vrealloc_align() has mismatched area vs requested sizes (%p)\n", p)) > return NULL; > } > > - /* > - * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What > - * would be a good heuristic for when to shrink the vm_area? > - */ > - if (size <= old_size) { > - /* Zero out "freed" memory, potentially for future realloc. */ > - if (want_init_on_free() || want_init_on_alloc(flags)) > - memset((void *)p + size, 0, old_size - size); > - vm->requested_size = size; > - kasan_poison_vmalloc(p + size, old_size - size); > - return (void *)p; > - } > + if (IS_ALIGNED((unsigned long)p, align)) { > + /* > + * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What > + * would be a good heuristic for when to shrink the vm_area? > + */ > + if (size <= old_size) { > + /* Zero out "freed" memory, potentially for future realloc. */ > + if (want_init_on_free() || want_init_on_alloc(flags)) > + memset((void *)p + size, 0, old_size - size); > + vm->requested_size = size; > + kasan_poison_vmalloc(p + size, old_size - size); > + return (void *)p; > + } > > - /* > - * We already have the bytes available in the allocation; use them. > - */ > - if (size <= alloced_size) { > - kasan_unpoison_vmalloc(p + old_size, size - old_size, > - KASAN_VMALLOC_PROT_NORMAL); > /* > - * No need to zero memory here, as unused memory will have > - * already been zeroed at initial allocation time or during > - * realloc shrink time. > + * We already have the bytes available in the allocation; use them. > + */ > + if (size <= alloced_size) { > + kasan_unpoison_vmalloc(p + old_size, size - old_size, > + KASAN_VMALLOC_PROT_NORMAL); > + /* > + * No need to zero memory here, as unused memory will have > + * already been zeroed at initial allocation time or during > + * realloc shrink time. > + */ > + vm->requested_size = size; > + return (void *)p; > + } > + } else { > + /* > + * p is not aligned with align. > + * Allocate a new address to handle it. > */ > - vm->requested_size = size; > - return (void *)p; > + if (size < old_size) > + old_size = size; > } > > /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ > - n = __vmalloc_noprof(size, flags); > + n = __vmalloc_node_noprof(size, align, flags, NUMA_NO_NODE, > + __builtin_return_address(0)); > if (!n) > return NULL; > > @@ -4170,6 +4189,11 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) > return n; > } > > +void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) > +{ > + return vrealloc_align_noprof(p, size, 1, flags); > +} > + > #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) > #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) > #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) > -- > 2.43.0 > This is similar what Vitaly is doing. There is already v14 but as example see it here: https://lkml.org/lkml/2025/7/9/1583 -- Uladzislau Rezki
Hi Hui, kernel test robot noticed the following build warnings: [auto build test WARNING on rust/rust-next] [also build test WARNING on akpm-mm/mm-everything rust/alloc-next linus/master v6.16-rc6 next-20250715] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information] url: https://github.com/intel-lab-lkp/linux/commits/Hui-Zhu/vmalloc-Add-vrealloc_align-to-support-allocation-of-aligned-vmap-pages/20250715-180136 base: https://github.com/Rust-for-Linux/linux rust-next patch link: https://lore.kernel.org/r/81647cce3b8e7139af47f20dbeba184b7a89b0cc.1752573305.git.zhuhui%40kylinos.cn patch subject: [PATCH 1/3] vmalloc: Add vrealloc_align to support allocation of aligned vmap pages config: i386-buildonly-randconfig-002-20250716 (https://download.01.org/0day-ci/archive/20250716/202507160708.jArplInK-lkp@intel.com/config) compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250716/202507160708.jArplInK-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202507160708.jArplInK-lkp@intel.com/ All warnings (new ones prefixed by >>): >> mm/vmalloc.c:4124:9: warning: format specifies type 'long' but the argument has type 'size_t' (aka 'unsigned int') [-Wformat] 4123 | WARN(1, "Trying to vrealloc_align() align is not power of 2 (%ld)\n", | ~~~ | %zu 4124 | align); | ^~~~~ include/asm-generic/bug.h:134:29: note: expanded from macro 'WARN' 134 | __WARN_printf(TAINT_WARN, format); \ | ^~~~~~ include/asm-generic/bug.h:106:17: note: expanded from macro '__WARN_printf' 106 | __warn_printk(arg); \ | ^~~ mm/vmalloc.c:1987:20: warning: unused function 'setup_vmalloc_vm' [-Wunused-function] 1987 | static inline void setup_vmalloc_vm(struct vm_struct *vm, | ^~~~~~~~~~~~~~~~ 2 warnings generated. vim +4124 mm/vmalloc.c 4082 4083 /** 4084 * vrealloc_align - reallocate virtually contiguous memory; 4085 * contents remain unchanged 4086 * @p: object to reallocate memory for 4087 * @size: the size to reallocate 4088 * @align: requested alignment 4089 * @flags: the flags for the page level allocator 4090 * 4091 * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and 4092 * @p is not a %NULL pointer, the object pointed to is freed. 4093 * 4094 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 4095 * initial memory allocation, every subsequent call to this API for the same 4096 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 4097 * __GFP_ZERO is not fully honored by this API. 4098 * 4099 * In any case, the contents of the object pointed to are preserved up to the 4100 * lesser of the new and old sizes. 4101 * 4102 * This function must not be called concurrently with itself or vfree() for the 4103 * same memory allocation. 4104 * 4105 * Return: pointer to the allocated memory; %NULL if @size is zero or in case of 4106 * failure 4107 */ 4108 void *vrealloc_align_noprof(const void *p, size_t size, size_t align, 4109 gfp_t flags) 4110 { 4111 struct vm_struct *vm = NULL; 4112 size_t alloced_size = 0; 4113 size_t old_size = 0; 4114 void *n; 4115 4116 if (!size) { 4117 vfree(p); 4118 return NULL; 4119 } 4120 4121 if (p) { 4122 if (!is_power_of_2(align)) { 4123 WARN(1, "Trying to vrealloc_align() align is not power of 2 (%ld)\n", > 4124 align); 4125 return NULL; 4126 } 4127 4128 vm = find_vm_area(p); 4129 if (unlikely(!vm)) { 4130 WARN(1, "Trying to vrealloc_align() nonexistent vm area (%p)\n", p); 4131 return NULL; 4132 } 4133 4134 alloced_size = get_vm_area_size(vm); 4135 old_size = vm->requested_size; 4136 if (WARN(alloced_size < old_size, 4137 "vrealloc_align() has mismatched area vs requested sizes (%p)\n", p)) 4138 return NULL; 4139 } 4140 4141 if (IS_ALIGNED((unsigned long)p, align)) { 4142 /* 4143 * TODO: Shrink the vm_area, i.e. unmap and free unused pages. What 4144 * would be a good heuristic for when to shrink the vm_area? 4145 */ 4146 if (size <= old_size) { 4147 /* Zero out "freed" memory, potentially for future realloc. */ 4148 if (want_init_on_free() || want_init_on_alloc(flags)) 4149 memset((void *)p + size, 0, old_size - size); 4150 vm->requested_size = size; 4151 kasan_poison_vmalloc(p + size, old_size - size); 4152 return (void *)p; 4153 } 4154 4155 /* 4156 * We already have the bytes available in the allocation; use them. 4157 */ 4158 if (size <= alloced_size) { 4159 kasan_unpoison_vmalloc(p + old_size, size - old_size, 4160 KASAN_VMALLOC_PROT_NORMAL); 4161 /* 4162 * No need to zero memory here, as unused memory will have 4163 * already been zeroed at initial allocation time or during 4164 * realloc shrink time. 4165 */ 4166 vm->requested_size = size; 4167 return (void *)p; 4168 } 4169 } else { 4170 /* 4171 * p is not aligned with align. 4172 * Allocate a new address to handle it. 4173 */ 4174 if (size < old_size) 4175 old_size = size; 4176 } 4177 4178 /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ 4179 n = __vmalloc_node_noprof(size, align, flags, NUMA_NO_NODE, 4180 __builtin_return_address(0)); 4181 if (!n) 4182 return NULL; 4183 4184 if (p) { 4185 memcpy(n, p, old_size); 4186 vfree(p); 4187 } 4188 4189 return n; 4190 } 4191 -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.