[PATCH v9 12/17] mm: move lesser used vma_area_struct members into the last cacheline

Suren Baghdasaryan posted 17 patches 1 year ago
There is a newer version of this series
[PATCH v9 12/17] mm: move lesser used vma_area_struct members into the last cacheline
Posted by Suren Baghdasaryan 1 year ago
Move several vma_area_struct members which are rarely or never used
during page fault handling into the last cacheline to better pack
vm_area_struct. As a result vm_area_struct will fit into 3 as opposed
to 4 cachelines. New typical vm_area_struct layout:

struct vm_area_struct {
    union {
        struct {
            long unsigned int vm_start;              /*     0     8 */
            long unsigned int vm_end;                /*     8     8 */
        };                                           /*     0    16 */
        freeptr_t          vm_freeptr;               /*     0     8 */
    };                                               /*     0    16 */
    struct mm_struct *         vm_mm;                /*    16     8 */
    pgprot_t                   vm_page_prot;         /*    24     8 */
    union {
        const vm_flags_t   vm_flags;                 /*    32     8 */
        vm_flags_t         __vm_flags;               /*    32     8 */
    };                                               /*    32     8 */
    unsigned int               vm_lock_seq;          /*    40     4 */

    /* XXX 4 bytes hole, try to pack */

    struct list_head           anon_vma_chain;       /*    48    16 */
    /* --- cacheline 1 boundary (64 bytes) --- */
    struct anon_vma *          anon_vma;             /*    64     8 */
    const struct vm_operations_struct  * vm_ops;     /*    72     8 */
    long unsigned int          vm_pgoff;             /*    80     8 */
    struct file *              vm_file;              /*    88     8 */
    void *                     vm_private_data;      /*    96     8 */
    atomic_long_t              swap_readahead_info;  /*   104     8 */
    struct mempolicy *         vm_policy;            /*   112     8 */
    struct vma_numab_state *   numab_state;          /*   120     8 */
    /* --- cacheline 2 boundary (128 bytes) --- */
    refcount_t          vm_refcnt (__aligned__(64)); /*   128     4 */

    /* XXX 4 bytes hole, try to pack */

    struct {
        struct rb_node     rb (__aligned__(8));      /*   136    24 */
        long unsigned int  rb_subtree_last;          /*   160     8 */
    } __attribute__((__aligned__(8))) shared;        /*   136    32 */
    struct anon_vma_name *     anon_name;            /*   168     8 */
    struct vm_userfaultfd_ctx  vm_userfaultfd_ctx;   /*   176     8 */

    /* size: 192, cachelines: 3, members: 18 */
    /* sum members: 176, holes: 2, sum holes: 8 */
    /* padding: 8 */
    /* forced alignments: 2, forced holes: 1, sum forced holes: 4 */
} __attribute__((__aligned__(64)));

Memory consumption per 1000 VMAs becomes 48 pages:

    slabinfo after vm_area_struct changes:
     <name>           ... <objsize> <objperslab> <pagesperslab> : ...
     vm_area_struct   ...    192   42    2 : ...

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 include/linux/mm_types.h | 38 ++++++++++++++++++--------------------
 1 file changed, 18 insertions(+), 20 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9228d19662c6..d902e6730654 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -725,17 +725,6 @@ struct vm_area_struct {
 	 */
 	unsigned int vm_lock_seq;
 #endif
-
-	/*
-	 * For areas with an address space and backing store,
-	 * linkage into the address_space->i_mmap interval tree.
-	 *
-	 */
-	struct {
-		struct rb_node rb;
-		unsigned long rb_subtree_last;
-	} shared;
-
 	/*
 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
@@ -755,14 +744,6 @@ struct vm_area_struct {
 	struct file * vm_file;		/* File we map to (can be NULL). */
 	void * vm_private_data;		/* was vm_pte (shared mem) */
 
-#ifdef CONFIG_ANON_VMA_NAME
-	/*
-	 * For private and shared anonymous mappings, a pointer to a null
-	 * terminated string containing the name given to the vma, or NULL if
-	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
-	 */
-	struct anon_vma_name *anon_name;
-#endif
 #ifdef CONFIG_SWAP
 	atomic_long_t swap_readahead_info;
 #endif
@@ -775,7 +756,6 @@ struct vm_area_struct {
 #ifdef CONFIG_NUMA_BALANCING
 	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
 #endif
-	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 #ifdef CONFIG_PER_VMA_LOCK
 	/* Unstable RCU readers are allowed to read this. */
 	refcount_t vm_refcnt ____cacheline_aligned_in_smp;
@@ -783,6 +763,24 @@ struct vm_area_struct {
 	struct lockdep_map vmlock_dep_map;
 #endif
 #endif
+	/*
+	 * For areas with an address space and backing store,
+	 * linkage into the address_space->i_mmap interval tree.
+	 *
+	 */
+	struct {
+		struct rb_node rb;
+		unsigned long rb_subtree_last;
+	} shared;
+#ifdef CONFIG_ANON_VMA_NAME
+	/*
+	 * For private and shared anonymous mappings, a pointer to a null
+	 * terminated string containing the name given to the vma, or NULL if
+	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
+	 */
+	struct anon_vma_name *anon_name;
+#endif
+	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 } __randomize_layout;
 
 #ifdef CONFIG_NUMA
-- 
2.47.1.613.gc27f4b7a9f-goog
Re: [PATCH v9 12/17] mm: move lesser used vma_area_struct members into the last cacheline
Posted by Peter Zijlstra 1 year ago
On Fri, Jan 10, 2025 at 08:25:59PM -0800, Suren Baghdasaryan wrote:
> Move several vma_area_struct members which are rarely or never used
> during page fault handling into the last cacheline to better pack
> vm_area_struct. As a result vm_area_struct will fit into 3 as opposed
> to 4 cachelines. New typical vm_area_struct layout:
> 
> struct vm_area_struct {
>     union {
>         struct {
>             long unsigned int vm_start;              /*     0     8 */
>             long unsigned int vm_end;                /*     8     8 */
>         };                                           /*     0    16 */
>         freeptr_t          vm_freeptr;               /*     0     8 */
>     };                                               /*     0    16 */
>     struct mm_struct *         vm_mm;                /*    16     8 */
>     pgprot_t                   vm_page_prot;         /*    24     8 */
>     union {
>         const vm_flags_t   vm_flags;                 /*    32     8 */
>         vm_flags_t         __vm_flags;               /*    32     8 */
>     };                                               /*    32     8 */
>     unsigned int               vm_lock_seq;          /*    40     4 */

Does it not make sense to move this seq field near the refcnt?

>     /* XXX 4 bytes hole, try to pack */
> 
>     struct list_head           anon_vma_chain;       /*    48    16 */
>     /* --- cacheline 1 boundary (64 bytes) --- */
>     struct anon_vma *          anon_vma;             /*    64     8 */
>     const struct vm_operations_struct  * vm_ops;     /*    72     8 */
>     long unsigned int          vm_pgoff;             /*    80     8 */
>     struct file *              vm_file;              /*    88     8 */
>     void *                     vm_private_data;      /*    96     8 */
>     atomic_long_t              swap_readahead_info;  /*   104     8 */
>     struct mempolicy *         vm_policy;            /*   112     8 */
>     struct vma_numab_state *   numab_state;          /*   120     8 */
>     /* --- cacheline 2 boundary (128 bytes) --- */
>     refcount_t          vm_refcnt (__aligned__(64)); /*   128     4 */
> 
>     /* XXX 4 bytes hole, try to pack */
> 
>     struct {
>         struct rb_node     rb (__aligned__(8));      /*   136    24 */
>         long unsigned int  rb_subtree_last;          /*   160     8 */
>     } __attribute__((__aligned__(8))) shared;        /*   136    32 */
>     struct anon_vma_name *     anon_name;            /*   168     8 */
>     struct vm_userfaultfd_ctx  vm_userfaultfd_ctx;   /*   176     8 */
> 
>     /* size: 192, cachelines: 3, members: 18 */
>     /* sum members: 176, holes: 2, sum holes: 8 */
>     /* padding: 8 */
>     /* forced alignments: 2, forced holes: 1, sum forced holes: 4 */
> } __attribute__((__aligned__(64)));
Re: [PATCH v9 12/17] mm: move lesser used vma_area_struct members into the last cacheline
Posted by Suren Baghdasaryan 1 year ago
On Wed, Jan 15, 2025 at 2:51 AM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Fri, Jan 10, 2025 at 08:25:59PM -0800, Suren Baghdasaryan wrote:
> > Move several vma_area_struct members which are rarely or never used
> > during page fault handling into the last cacheline to better pack
> > vm_area_struct. As a result vm_area_struct will fit into 3 as opposed
> > to 4 cachelines. New typical vm_area_struct layout:
> >
> > struct vm_area_struct {
> >     union {
> >         struct {
> >             long unsigned int vm_start;              /*     0     8 */
> >             long unsigned int vm_end;                /*     8     8 */
> >         };                                           /*     0    16 */
> >         freeptr_t          vm_freeptr;               /*     0     8 */
> >     };                                               /*     0    16 */
> >     struct mm_struct *         vm_mm;                /*    16     8 */
> >     pgprot_t                   vm_page_prot;         /*    24     8 */
> >     union {
> >         const vm_flags_t   vm_flags;                 /*    32     8 */
> >         vm_flags_t         __vm_flags;               /*    32     8 */
> >     };                                               /*    32     8 */
> >     unsigned int               vm_lock_seq;          /*    40     4 */
>
> Does it not make sense to move this seq field near the refcnt?

In an earlier version, when vm_lock was not a refcount yet, I tried
that and moving vm_lock_seq introduced regression in the pft test. We
have that early vm_lock_seq check in the beginning of vma_start_read()
and if it fails we bail out early without locking. I think that might
be the reason why keeping vm_lock_seq in the first cacheling is
beneficial. But I'll try moving it again now that we have vm_refcnt
instead of the lock and see if pft still shows any regression.

>
> >     /* XXX 4 bytes hole, try to pack */
> >
> >     struct list_head           anon_vma_chain;       /*    48    16 */
> >     /* --- cacheline 1 boundary (64 bytes) --- */
> >     struct anon_vma *          anon_vma;             /*    64     8 */
> >     const struct vm_operations_struct  * vm_ops;     /*    72     8 */
> >     long unsigned int          vm_pgoff;             /*    80     8 */
> >     struct file *              vm_file;              /*    88     8 */
> >     void *                     vm_private_data;      /*    96     8 */
> >     atomic_long_t              swap_readahead_info;  /*   104     8 */
> >     struct mempolicy *         vm_policy;            /*   112     8 */
> >     struct vma_numab_state *   numab_state;          /*   120     8 */
> >     /* --- cacheline 2 boundary (128 bytes) --- */
> >     refcount_t          vm_refcnt (__aligned__(64)); /*   128     4 */
> >
> >     /* XXX 4 bytes hole, try to pack */
> >
> >     struct {
> >         struct rb_node     rb (__aligned__(8));      /*   136    24 */
> >         long unsigned int  rb_subtree_last;          /*   160     8 */
> >     } __attribute__((__aligned__(8))) shared;        /*   136    32 */
> >     struct anon_vma_name *     anon_name;            /*   168     8 */
> >     struct vm_userfaultfd_ctx  vm_userfaultfd_ctx;   /*   176     8 */
> >
> >     /* size: 192, cachelines: 3, members: 18 */
> >     /* sum members: 176, holes: 2, sum holes: 8 */
> >     /* padding: 8 */
> >     /* forced alignments: 2, forced holes: 1, sum forced holes: 4 */
> > } __attribute__((__aligned__(64)));
>
>
Re: [PATCH v9 12/17] mm: move lesser used vma_area_struct members into the last cacheline
Posted by Suren Baghdasaryan 11 months, 4 weeks ago
On Wed, Jan 15, 2025 at 8:39 AM Suren Baghdasaryan <surenb@google.com> wrote:
>
> On Wed, Jan 15, 2025 at 2:51 AM Peter Zijlstra <peterz@infradead.org> wrote:
> >
> > On Fri, Jan 10, 2025 at 08:25:59PM -0800, Suren Baghdasaryan wrote:
> > > Move several vma_area_struct members which are rarely or never used
> > > during page fault handling into the last cacheline to better pack
> > > vm_area_struct. As a result vm_area_struct will fit into 3 as opposed
> > > to 4 cachelines. New typical vm_area_struct layout:
> > >
> > > struct vm_area_struct {
> > >     union {
> > >         struct {
> > >             long unsigned int vm_start;              /*     0     8 */
> > >             long unsigned int vm_end;                /*     8     8 */
> > >         };                                           /*     0    16 */
> > >         freeptr_t          vm_freeptr;               /*     0     8 */
> > >     };                                               /*     0    16 */
> > >     struct mm_struct *         vm_mm;                /*    16     8 */
> > >     pgprot_t                   vm_page_prot;         /*    24     8 */
> > >     union {
> > >         const vm_flags_t   vm_flags;                 /*    32     8 */
> > >         vm_flags_t         __vm_flags;               /*    32     8 */
> > >     };                                               /*    32     8 */
> > >     unsigned int               vm_lock_seq;          /*    40     4 */
> >
> > Does it not make sense to move this seq field near the refcnt?
>
> In an earlier version, when vm_lock was not a refcount yet, I tried
> that and moving vm_lock_seq introduced regression in the pft test. We
> have that early vm_lock_seq check in the beginning of vma_start_read()
> and if it fails we bail out early without locking. I think that might
> be the reason why keeping vm_lock_seq in the first cacheling is
> beneficial. But I'll try moving it again now that we have vm_refcnt
> instead of the lock and see if pft still shows any regression.

I confirmed that moving vm_lock_seq next to vm_refcnt regresses
pagefault performance:

Hmean     faults/cpu-1    508634.6876 (   0.00%)   508548.5498 *  -0.02%*
Hmean     faults/cpu-4    474767.2684 (   0.00%)   475620.7653 *   0.18%*
Hmean     faults/cpu-7    451356.6844 (   0.00%)   446738.2381 *  -1.02%*
Hmean     faults/cpu-12   360114.9092 (   0.00%)   337121.8189 *  -6.38%*
Hmean     faults/cpu-21   227567.8237 (   0.00%)   205277.2029 *  -9.80%*
Hmean     faults/cpu-30   163383.6765 (   0.00%)   152765.1451 *  -6.50%*
Hmean     faults/cpu-48   118048.2568 (   0.00%)   109959.2027 *  -6.85%*
Hmean     faults/cpu-56   103189.6761 (   0.00%)    92989.3749 *  -9.89%*
Hmean     faults/sec-1    508228.4512 (   0.00%)   508129.1963 *  -0.02%*
Hmean     faults/sec-4   1854868.9033 (   0.00%)  1862443.6146 *   0.41%*
Hmean     faults/sec-7   3088881.6158 (   0.00%)  3050403.1664 *  -1.25%*
Hmean     faults/sec-12  4222540.9948 (   0.00%)  3951163.9557 *  -6.43%*
Hmean     faults/sec-21  4555777.5386 (   0.00%)  4130470.6021 *  -9.34%*
Hmean     faults/sec-30  4336721.3467 (   0.00%)  4150477.5095 *  -4.29%*
Hmean     faults/sec-48  5163921.7465 (   0.00%)  4857286.2166 *  -5.94%*
Hmean     faults/sec-56  5413622.8890 (   0.00%)  4936484.0021 *  -8.81%*

So, I kept it unchanged in v10
(https://lore.kernel.org/all/20250213224655.1680278-14-surenb@google.com/)

>
> >
> > >     /* XXX 4 bytes hole, try to pack */
> > >
> > >     struct list_head           anon_vma_chain;       /*    48    16 */
> > >     /* --- cacheline 1 boundary (64 bytes) --- */
> > >     struct anon_vma *          anon_vma;             /*    64     8 */
> > >     const struct vm_operations_struct  * vm_ops;     /*    72     8 */
> > >     long unsigned int          vm_pgoff;             /*    80     8 */
> > >     struct file *              vm_file;              /*    88     8 */
> > >     void *                     vm_private_data;      /*    96     8 */
> > >     atomic_long_t              swap_readahead_info;  /*   104     8 */
> > >     struct mempolicy *         vm_policy;            /*   112     8 */
> > >     struct vma_numab_state *   numab_state;          /*   120     8 */
> > >     /* --- cacheline 2 boundary (128 bytes) --- */
> > >     refcount_t          vm_refcnt (__aligned__(64)); /*   128     4 */
> > >
> > >     /* XXX 4 bytes hole, try to pack */
> > >
> > >     struct {
> > >         struct rb_node     rb (__aligned__(8));      /*   136    24 */
> > >         long unsigned int  rb_subtree_last;          /*   160     8 */
> > >     } __attribute__((__aligned__(8))) shared;        /*   136    32 */
> > >     struct anon_vma_name *     anon_name;            /*   168     8 */
> > >     struct vm_userfaultfd_ctx  vm_userfaultfd_ctx;   /*   176     8 */
> > >
> > >     /* size: 192, cachelines: 3, members: 18 */
> > >     /* sum members: 176, holes: 2, sum holes: 8 */
> > >     /* padding: 8 */
> > >     /* forced alignments: 2, forced holes: 1, sum forced holes: 4 */
> > > } __attribute__((__aligned__(64)));
> >
> >
Re: [PATCH v9 12/17] mm: move lesser used vma_area_struct members into the last cacheline
Posted by Lorenzo Stoakes 1 year ago
On Fri, Jan 10, 2025 at 08:25:59PM -0800, Suren Baghdasaryan wrote:
> Move several vma_area_struct members which are rarely or never used
> during page fault handling into the last cacheline to better pack
> vm_area_struct. As a result vm_area_struct will fit into 3 as opposed
> to 4 cachelines. New typical vm_area_struct layout:
>
> struct vm_area_struct {
>     union {
>         struct {
>             long unsigned int vm_start;              /*     0     8 */
>             long unsigned int vm_end;                /*     8     8 */
>         };                                           /*     0    16 */
>         freeptr_t          vm_freeptr;               /*     0     8 */
>     };                                               /*     0    16 */
>     struct mm_struct *         vm_mm;                /*    16     8 */
>     pgprot_t                   vm_page_prot;         /*    24     8 */
>     union {
>         const vm_flags_t   vm_flags;                 /*    32     8 */
>         vm_flags_t         __vm_flags;               /*    32     8 */
>     };                                               /*    32     8 */
>     unsigned int               vm_lock_seq;          /*    40     4 */
>
>     /* XXX 4 bytes hole, try to pack */
>
>     struct list_head           anon_vma_chain;       /*    48    16 */
>     /* --- cacheline 1 boundary (64 bytes) --- */
>     struct anon_vma *          anon_vma;             /*    64     8 */
>     const struct vm_operations_struct  * vm_ops;     /*    72     8 */
>     long unsigned int          vm_pgoff;             /*    80     8 */
>     struct file *              vm_file;              /*    88     8 */
>     void *                     vm_private_data;      /*    96     8 */
>     atomic_long_t              swap_readahead_info;  /*   104     8 */
>     struct mempolicy *         vm_policy;            /*   112     8 */
>     struct vma_numab_state *   numab_state;          /*   120     8 */
>     /* --- cacheline 2 boundary (128 bytes) --- */
>     refcount_t          vm_refcnt (__aligned__(64)); /*   128     4 */
>
>     /* XXX 4 bytes hole, try to pack */
>
>     struct {
>         struct rb_node     rb (__aligned__(8));      /*   136    24 */
>         long unsigned int  rb_subtree_last;          /*   160     8 */
>     } __attribute__((__aligned__(8))) shared;        /*   136    32 */
>     struct anon_vma_name *     anon_name;            /*   168     8 */
>     struct vm_userfaultfd_ctx  vm_userfaultfd_ctx;   /*   176     8 */
>
>     /* size: 192, cachelines: 3, members: 18 */
>     /* sum members: 176, holes: 2, sum holes: 8 */
>     /* padding: 8 */
>     /* forced alignments: 2, forced holes: 1, sum forced holes: 4 */
> } __attribute__((__aligned__(64)));
>
> Memory consumption per 1000 VMAs becomes 48 pages:
>
>     slabinfo after vm_area_struct changes:
>      <name>           ... <objsize> <objperslab> <pagesperslab> : ...
>      vm_area_struct   ...    192   42    2 : ...
>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>

Looks sensible to me:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

> ---
>  include/linux/mm_types.h | 38 ++++++++++++++++++--------------------
>  1 file changed, 18 insertions(+), 20 deletions(-)
>
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 9228d19662c6..d902e6730654 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -725,17 +725,6 @@ struct vm_area_struct {
>  	 */
>  	unsigned int vm_lock_seq;
>  #endif
> -
> -	/*
> -	 * For areas with an address space and backing store,
> -	 * linkage into the address_space->i_mmap interval tree.
> -	 *
> -	 */
> -	struct {
> -		struct rb_node rb;
> -		unsigned long rb_subtree_last;
> -	} shared;
> -
>  	/*
>  	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
>  	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
> @@ -755,14 +744,6 @@ struct vm_area_struct {
>  	struct file * vm_file;		/* File we map to (can be NULL). */
>  	void * vm_private_data;		/* was vm_pte (shared mem) */
>
> -#ifdef CONFIG_ANON_VMA_NAME
> -	/*
> -	 * For private and shared anonymous mappings, a pointer to a null
> -	 * terminated string containing the name given to the vma, or NULL if
> -	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
> -	 */
> -	struct anon_vma_name *anon_name;
> -#endif
>  #ifdef CONFIG_SWAP
>  	atomic_long_t swap_readahead_info;
>  #endif
> @@ -775,7 +756,6 @@ struct vm_area_struct {
>  #ifdef CONFIG_NUMA_BALANCING
>  	struct vma_numab_state *numab_state;	/* NUMA Balancing state */
>  #endif
> -	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
>  #ifdef CONFIG_PER_VMA_LOCK
>  	/* Unstable RCU readers are allowed to read this. */
>  	refcount_t vm_refcnt ____cacheline_aligned_in_smp;
> @@ -783,6 +763,24 @@ struct vm_area_struct {
>  	struct lockdep_map vmlock_dep_map;
>  #endif
>  #endif
> +	/*
> +	 * For areas with an address space and backing store,
> +	 * linkage into the address_space->i_mmap interval tree.
> +	 *
> +	 */
> +	struct {
> +		struct rb_node rb;
> +		unsigned long rb_subtree_last;
> +	} shared;
> +#ifdef CONFIG_ANON_VMA_NAME
> +	/*
> +	 * For private and shared anonymous mappings, a pointer to a null
> +	 * terminated string containing the name given to the vma, or NULL if
> +	 * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
> +	 */
> +	struct anon_vma_name *anon_name;
> +#endif
> +	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
>  } __randomize_layout;
>
>  #ifdef CONFIG_NUMA
> --
> 2.47.1.613.gc27f4b7a9f-goog
>