To enable node specific hash-tables.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
include/linux/vmalloc.h | 1 +
mm/vmalloc.c | 11 ++++++++---
2 files changed, 9 insertions(+), 3 deletions(-)
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -152,6 +152,7 @@ extern void *__vmalloc_node_range(unsign
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller) __alloc_size(1);
void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+void *vmalloc_huge_node(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1);
extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3416,6 +3416,13 @@ void *vmalloc(unsigned long size)
}
EXPORT_SYMBOL(vmalloc);
+void *vmalloc_huge_node(unsigned long size, gfp_t gfp_mask, int node)
+{
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
+ node, __builtin_return_address(0));
+}
+
/**
* vmalloc_huge - allocate virtually contiguous memory, allow huge pages
* @size: allocation size
@@ -3430,9 +3437,7 @@ EXPORT_SYMBOL(vmalloc);
*/
void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
{
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
- NUMA_NO_NODE, __builtin_return_address(0));
+ return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE);
}
EXPORT_SYMBOL_GPL(vmalloc_huge);
On Fri, Jul 14, 2023 at 03:39:04PM +0200, Peter Zijlstra wrote:
> +void *vmalloc_huge_node(unsigned long size, gfp_t gfp_mask, int node)
> +{
> + return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> + gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
> + node, __builtin_return_address(0));
> +}
> +
> /**
> * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
> * @size: allocation size
> @@ -3430,9 +3437,7 @@ EXPORT_SYMBOL(vmalloc);
> */
> void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
> {
> - return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> - gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
> - NUMA_NO_NODE, __builtin_return_address(0));
> + return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE);
> }
Isn't this going to result in the "caller" being always recorded as
vmalloc_huge() instead of the caller of vmalloc_huge()?
On Fri, Jul 14, 2023 at 03:37:38PM +0100, Matthew Wilcox wrote:
> On Fri, Jul 14, 2023 at 03:39:04PM +0200, Peter Zijlstra wrote:
> > +void *vmalloc_huge_node(unsigned long size, gfp_t gfp_mask, int node)
> > +{
> > + return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> > + gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
> > + node, __builtin_return_address(0));
> > +}
> > +
> > /**
> > * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
> > * @size: allocation size
> > @@ -3430,9 +3437,7 @@ EXPORT_SYMBOL(vmalloc);
> > */
> > void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
> > {
> > - return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> > - gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
> > - NUMA_NO_NODE, __builtin_return_address(0));
> > + return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE);
> > }
>
> Isn't this going to result in the "caller" being always recorded as
> vmalloc_huge() instead of the caller of vmalloc_huge()?
Durr, I missed that, but it depends, not if the compiler inlines it.
I'll make a common __always_inline helper to cure this.
On Fri, Jul 14, 2023 at 05:09:48PM +0200, Peter Zijlstra wrote:
> On Fri, Jul 14, 2023 at 03:37:38PM +0100, Matthew Wilcox wrote:
> > On Fri, Jul 14, 2023 at 03:39:04PM +0200, Peter Zijlstra wrote:
> > > +void *vmalloc_huge_node(unsigned long size, gfp_t gfp_mask, int node)
> > > +{
> > > + return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> > > + gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
> > > + node, __builtin_return_address(0));
> > > +}
> > > +
> > > /**
> > > * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
> > > * @size: allocation size
> > > @@ -3430,9 +3437,7 @@ EXPORT_SYMBOL(vmalloc);
> > > */
> > > void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
> > > {
> > > - return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
> > > - gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
> > > - NUMA_NO_NODE, __builtin_return_address(0));
> > > + return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE);
> > > }
> >
> > Isn't this going to result in the "caller" being always recorded as
> > vmalloc_huge() instead of the caller of vmalloc_huge()?
>
> Durr, I missed that, but it depends, not if the compiler inlines it.
>
> I'll make a common __always_inline helper to cure this.
... or just don't change vmalloc_huge()? Or make the common helper take
the __builtin_return_address as a parameter?
On Fri, Jul 14, 2023 at 04:11:39PM +0100, Matthew Wilcox wrote:
> ... or just don't change vmalloc_huge()?
Yeah, that, everything else just adds more lines without read benefit. I
eneded up with the below.
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -152,6 +152,7 @@ extern void *__vmalloc_node_range(unsign
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller) __alloc_size(1);
void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
+void *vmalloc_huge_node(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1);
extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3416,6 +3416,13 @@ void *vmalloc(unsigned long size)
}
EXPORT_SYMBOL(vmalloc);
+void *vmalloc_huge_node(unsigned long size, gfp_t gfp_mask, int node)
+{
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
+ node, __builtin_return_address(0));
+}
+
/**
* vmalloc_huge - allocate virtually contiguous memory, allow huge pages
* @size: allocation size
© 2016 - 2026 Red Hat, Inc.