Move several vma_area_struct members which are rarely or never used
during page fault handling into the last cacheline to better pack
vm_area_struct. As a result vm_area_struct will fit into 3 cachelines
as opposed to 4 cachelines before this change. New vm_area_struct layout:
struct vm_area_struct {
union {
struct {
long unsigned int vm_start; /* 0 8 */
long unsigned int vm_end; /* 8 8 */
}; /* 0 16 */
struct callback_head vm_rcu ; /* 0 16 */
} __attribute__((__aligned__(8))); /* 0 16 */
struct mm_struct * vm_mm; /* 16 8 */
pgprot_t vm_page_prot; /* 24 8 */
union {
const vm_flags_t vm_flags; /* 32 8 */
vm_flags_t __vm_flags; /* 32 8 */
}; /* 32 8 */
bool detached; /* 40 1 */
/* XXX 3 bytes hole, try to pack */
unsigned int vm_lock_seq; /* 44 4 */
struct list_head anon_vma_chain; /* 48 16 */
/* --- cacheline 1 boundary (64 bytes) --- */
struct anon_vma * anon_vma; /* 64 8 */
const struct vm_operations_struct * vm_ops; /* 72 8 */
long unsigned int vm_pgoff; /* 80 8 */
struct file * vm_file; /* 88 8 */
void * vm_private_data; /* 96 8 */
atomic_long_t swap_readahead_info; /* 104 8 */
struct mempolicy * vm_policy; /* 112 8 */
/* XXX 8 bytes hole, try to pack */
/* --- cacheline 2 boundary (128 bytes) --- */
struct vma_lock vm_lock (__aligned__(64)); /* 128 4 */
/* XXX 4 bytes hole, try to pack */
struct {
struct rb_node rb (__aligned__(8)); /* 136 24 */
long unsigned int rb_subtree_last; /* 160 8 */
} __attribute__((__aligned__(8))) shared; /* 136 32 */
struct vm_userfaultfd_ctx vm_userfaultfd_ctx; /* 168 0 */
/* size: 192, cachelines: 3, members: 17 */
/* sum members: 153, holes: 3, sum holes: 15 */
/* padding: 24 */
/* forced alignments: 3, forced holes: 2, sum forced holes: 12 */
} __attribute__((__aligned__(64)));
Memory consumption per 1000 VMAs becomes 48 pages:
slabinfo after vm_area_struct changes:
<name> ... <objsize> <objperslab> <pagesperslab> : ...
vm_area_struct ... 192 42 2 : ...
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
include/linux/mm_types.h | 37 ++++++++++++++++++-------------------
1 file changed, 18 insertions(+), 19 deletions(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 789bccc05520..c3755b680911 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -733,16 +733,6 @@ struct vm_area_struct {
unsigned int vm_lock_seq;
#endif
- /*
- * For areas with an address space and backing store,
- * linkage into the address_space->i_mmap interval tree.
- *
- */
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } shared;
-
/*
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
* list, after a COW of one of the file pages. A MAP_SHARED vma
@@ -762,14 +752,6 @@ struct vm_area_struct {
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
-#ifdef CONFIG_ANON_VMA_NAME
- /*
- * For private and shared anonymous mappings, a pointer to a null
- * terminated string containing the name given to the vma, or NULL if
- * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
- */
- struct anon_vma_name *anon_name;
-#endif
#ifdef CONFIG_SWAP
atomic_long_t swap_readahead_info;
#endif
@@ -782,11 +764,28 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA_BALANCING
struct vma_numab_state *numab_state; /* NUMA Balancing state */
#endif
- struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
#ifdef CONFIG_PER_VMA_LOCK
/* Unstable RCU readers are allowed to read this. */
struct vma_lock vm_lock ____cacheline_aligned_in_smp;
#endif
+ /*
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap interval tree.
+ *
+ */
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } shared;
+#ifdef CONFIG_ANON_VMA_NAME
+ /*
+ * For private and shared anonymous mappings, a pointer to a null
+ * terminated string containing the name given to the vma, or NULL if
+ * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
+ */
+ struct anon_vma_name *anon_name;
+#endif
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
#ifdef CONFIG_NUMA
--
2.47.0.277.g8800431eea-goog
On 11/11/24 21:55, Suren Baghdasaryan wrote:
> Move several vma_area_struct members which are rarely or never used
> during page fault handling into the last cacheline to better pack
> vm_area_struct. As a result vm_area_struct will fit into 3 cachelines
> as opposed to 4 cachelines before this change. New vm_area_struct layout:
>
> struct vm_area_struct {
> union {
> struct {
> long unsigned int vm_start; /* 0 8 */
> long unsigned int vm_end; /* 8 8 */
> }; /* 0 16 */
> struct callback_head vm_rcu ; /* 0 16 */
> } __attribute__((__aligned__(8))); /* 0 16 */
> struct mm_struct * vm_mm; /* 16 8 */
> pgprot_t vm_page_prot; /* 24 8 */
> union {
> const vm_flags_t vm_flags; /* 32 8 */
> vm_flags_t __vm_flags; /* 32 8 */
> }; /* 32 8 */
> bool detached; /* 40 1 */
>
> /* XXX 3 bytes hole, try to pack */
>
> unsigned int vm_lock_seq; /* 44 4 */
> struct list_head anon_vma_chain; /* 48 16 */
> /* --- cacheline 1 boundary (64 bytes) --- */
> struct anon_vma * anon_vma; /* 64 8 */
> const struct vm_operations_struct * vm_ops; /* 72 8 */
> long unsigned int vm_pgoff; /* 80 8 */
> struct file * vm_file; /* 88 8 */
> void * vm_private_data; /* 96 8 */
> atomic_long_t swap_readahead_info; /* 104 8 */
> struct mempolicy * vm_policy; /* 112 8 */
>
> /* XXX 8 bytes hole, try to pack */
>
> /* --- cacheline 2 boundary (128 bytes) --- */
> struct vma_lock vm_lock (__aligned__(64)); /* 128 4 */
>
> /* XXX 4 bytes hole, try to pack */
>
> struct {
> struct rb_node rb (__aligned__(8)); /* 136 24 */
> long unsigned int rb_subtree_last; /* 160 8 */
> } __attribute__((__aligned__(8))) shared; /* 136 32 */
> struct vm_userfaultfd_ctx vm_userfaultfd_ctx; /* 168 0 */
I don't see anon_name in the output, I thought it was added for Android? :)
>
> /* size: 192, cachelines: 3, members: 17 */
> /* sum members: 153, holes: 3, sum holes: 15 */
> /* padding: 24 */
Instead you seem to have padding so an attempt to use SLAB_TYPESAFE_BY_RCU
should use that and not add more up to 256 pages.
Perhaps this pahole output wasn't generated with a fully representative config?
> /* forced alignments: 3, forced holes: 2, sum forced holes: 12 */
> } __attribute__((__aligned__(64)));
>
>
> Memory consumption per 1000 VMAs becomes 48 pages:
>
> slabinfo after vm_area_struct changes:
> <name> ... <objsize> <objperslab> <pagesperslab> : ...
> vm_area_struct ... 192 42 2 : ...
>
>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
> include/linux/mm_types.h | 37 ++++++++++++++++++-------------------
> 1 file changed, 18 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 789bccc05520..c3755b680911 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -733,16 +733,6 @@ struct vm_area_struct {
> unsigned int vm_lock_seq;
> #endif
>
> - /*
> - * For areas with an address space and backing store,
> - * linkage into the address_space->i_mmap interval tree.
> - *
> - */
> - struct {
> - struct rb_node rb;
> - unsigned long rb_subtree_last;
> - } shared;
> -
> /*
> * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
> * list, after a COW of one of the file pages. A MAP_SHARED vma
> @@ -762,14 +752,6 @@ struct vm_area_struct {
> struct file * vm_file; /* File we map to (can be NULL). */
> void * vm_private_data; /* was vm_pte (shared mem) */
>
> -#ifdef CONFIG_ANON_VMA_NAME
> - /*
> - * For private and shared anonymous mappings, a pointer to a null
> - * terminated string containing the name given to the vma, or NULL if
> - * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
> - */
> - struct anon_vma_name *anon_name;
> -#endif
> #ifdef CONFIG_SWAP
> atomic_long_t swap_readahead_info;
> #endif
> @@ -782,11 +764,28 @@ struct vm_area_struct {
> #ifdef CONFIG_NUMA_BALANCING
> struct vma_numab_state *numab_state; /* NUMA Balancing state */
> #endif
> - struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
> #ifdef CONFIG_PER_VMA_LOCK
> /* Unstable RCU readers are allowed to read this. */
> struct vma_lock vm_lock ____cacheline_aligned_in_smp;
> #endif
> + /*
> + * For areas with an address space and backing store,
> + * linkage into the address_space->i_mmap interval tree.
> + *
> + */
> + struct {
> + struct rb_node rb;
> + unsigned long rb_subtree_last;
> + } shared;
> +#ifdef CONFIG_ANON_VMA_NAME
> + /*
> + * For private and shared anonymous mappings, a pointer to a null
> + * terminated string containing the name given to the vma, or NULL if
> + * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
> + */
> + struct anon_vma_name *anon_name;
> +#endif
> + struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
> } __randomize_layout;
>
> #ifdef CONFIG_NUMA
On Tue, Nov 12, 2024 at 2:07 AM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> On 11/11/24 21:55, Suren Baghdasaryan wrote:
> > Move several vma_area_struct members which are rarely or never used
> > during page fault handling into the last cacheline to better pack
> > vm_area_struct. As a result vm_area_struct will fit into 3 cachelines
> > as opposed to 4 cachelines before this change. New vm_area_struct layout:
> >
> > struct vm_area_struct {
> > union {
> > struct {
> > long unsigned int vm_start; /* 0 8 */
> > long unsigned int vm_end; /* 8 8 */
> > }; /* 0 16 */
> > struct callback_head vm_rcu ; /* 0 16 */
> > } __attribute__((__aligned__(8))); /* 0 16 */
> > struct mm_struct * vm_mm; /* 16 8 */
> > pgprot_t vm_page_prot; /* 24 8 */
> > union {
> > const vm_flags_t vm_flags; /* 32 8 */
> > vm_flags_t __vm_flags; /* 32 8 */
> > }; /* 32 8 */
> > bool detached; /* 40 1 */
> >
> > /* XXX 3 bytes hole, try to pack */
> >
> > unsigned int vm_lock_seq; /* 44 4 */
> > struct list_head anon_vma_chain; /* 48 16 */
> > /* --- cacheline 1 boundary (64 bytes) --- */
> > struct anon_vma * anon_vma; /* 64 8 */
> > const struct vm_operations_struct * vm_ops; /* 72 8 */
> > long unsigned int vm_pgoff; /* 80 8 */
> > struct file * vm_file; /* 88 8 */
> > void * vm_private_data; /* 96 8 */
> > atomic_long_t swap_readahead_info; /* 104 8 */
> > struct mempolicy * vm_policy; /* 112 8 */
> >
> > /* XXX 8 bytes hole, try to pack */
> >
> > /* --- cacheline 2 boundary (128 bytes) --- */
> > struct vma_lock vm_lock (__aligned__(64)); /* 128 4 */
> >
> > /* XXX 4 bytes hole, try to pack */
> >
> > struct {
> > struct rb_node rb (__aligned__(8)); /* 136 24 */
> > long unsigned int rb_subtree_last; /* 160 8 */
> > } __attribute__((__aligned__(8))) shared; /* 136 32 */
> > struct vm_userfaultfd_ctx vm_userfaultfd_ctx; /* 168 0 */
>
> I don't see anon_name in the output, I thought it was added for Android? :)
Yes, this output is generated with defconfig. That's why you see some
holes in this structure. On my x86 machine I have non-zero
vm_userfaultfd_ctx and numab_state, on Android I have
vm_userfaultfd_ctx and anon_name.
>
> >
> > /* size: 192, cachelines: 3, members: 17 */
> > /* sum members: 153, holes: 3, sum holes: 15 */
> > /* padding: 24 */
>
> Instead you seem to have padding so an attempt to use SLAB_TYPESAFE_BY_RCU
> should use that and not add more up to 256 pages.
Yes, thanks for the tip about SLAB_TYPESAFE_BY_RCU freelist. In actual
configurations where I saw SLAB_TYPESAFE_BY_RCU causing this structure
to grow I had less padding at the end.
> Perhaps this pahole output wasn't generated with a fully representative config?
You are right. I'll replace it with the actual output from my x86
setup (Android probably has a smaller interested audience).
>
> > /* forced alignments: 3, forced holes: 2, sum forced holes: 12 */
> > } __attribute__((__aligned__(64)));
> >
> >
> > Memory consumption per 1000 VMAs becomes 48 pages:
> >
> > slabinfo after vm_area_struct changes:
> > <name> ... <objsize> <objperslab> <pagesperslab> : ...
> > vm_area_struct ... 192 42 2 : ...
> >
> >
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> > include/linux/mm_types.h | 37 ++++++++++++++++++-------------------
> > 1 file changed, 18 insertions(+), 19 deletions(-)
> >
> > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> > index 789bccc05520..c3755b680911 100644
> > --- a/include/linux/mm_types.h
> > +++ b/include/linux/mm_types.h
> > @@ -733,16 +733,6 @@ struct vm_area_struct {
> > unsigned int vm_lock_seq;
> > #endif
> >
> > - /*
> > - * For areas with an address space and backing store,
> > - * linkage into the address_space->i_mmap interval tree.
> > - *
> > - */
> > - struct {
> > - struct rb_node rb;
> > - unsigned long rb_subtree_last;
> > - } shared;
> > -
> > /*
> > * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
> > * list, after a COW of one of the file pages. A MAP_SHARED vma
> > @@ -762,14 +752,6 @@ struct vm_area_struct {
> > struct file * vm_file; /* File we map to (can be NULL). */
> > void * vm_private_data; /* was vm_pte (shared mem) */
> >
> > -#ifdef CONFIG_ANON_VMA_NAME
> > - /*
> > - * For private and shared anonymous mappings, a pointer to a null
> > - * terminated string containing the name given to the vma, or NULL if
> > - * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
> > - */
> > - struct anon_vma_name *anon_name;
> > -#endif
> > #ifdef CONFIG_SWAP
> > atomic_long_t swap_readahead_info;
> > #endif
> > @@ -782,11 +764,28 @@ struct vm_area_struct {
> > #ifdef CONFIG_NUMA_BALANCING
> > struct vma_numab_state *numab_state; /* NUMA Balancing state */
> > #endif
> > - struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
> > #ifdef CONFIG_PER_VMA_LOCK
> > /* Unstable RCU readers are allowed to read this. */
> > struct vma_lock vm_lock ____cacheline_aligned_in_smp;
> > #endif
> > + /*
> > + * For areas with an address space and backing store,
> > + * linkage into the address_space->i_mmap interval tree.
> > + *
> > + */
> > + struct {
> > + struct rb_node rb;
> > + unsigned long rb_subtree_last;
> > + } shared;
> > +#ifdef CONFIG_ANON_VMA_NAME
> > + /*
> > + * For private and shared anonymous mappings, a pointer to a null
> > + * terminated string containing the name given to the vma, or NULL if
> > + * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
> > + */
> > + struct anon_vma_name *anon_name;
> > +#endif
> > + struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
> > } __randomize_layout;
> >
> > #ifdef CONFIG_NUMA
>
© 2016 - 2026 Red Hat, Inc.