[PATCH v3 10/13] mm: zswap: Add a per-cpu "acomp_batch_ctx" to struct zswap_pool.

Kanchana P Sridhar posted 13 patches 2 weeks, 3 days ago
There is a newer version of this series
[PATCH v3 10/13] mm: zswap: Add a per-cpu "acomp_batch_ctx" to struct zswap_pool.
Posted by Kanchana P Sridhar 2 weeks, 3 days ago
This patch adds a separate per-cpu batching acomp context "acomp_batch_ctx"
to the zswap_pool. The per-cpu acomp_batch_ctx pointer is allocated at pool
creation time, but no per-cpu resources are allocated for it.

The idea is to not incur the memory footprint cost of multiple acomp_reqs
and buffers in the existing "acomp_ctx" for cases where compress batching
is not possible; for instance, with software compressor algorithms, on
systems without IAA, on systems with IAA that want to run the existing
non-batching implementation of zswap_store() of large folios.

By creating a separate acomp_batch_ctx, we have the ability to allocate
additional memory per-cpu only if the zswap compressor supports batching,
and if the user wants to enable the use of compress batching in
zswap_store() to improve swapout performance of large folios.

Suggested-by: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
---
 mm/zswap.c | 22 +++++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index 02e031122fdf..80a928cf0f7e 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -160,6 +160,7 @@ struct crypto_acomp_ctx {
 struct zswap_pool {
 	struct zpool *zpool;
 	struct crypto_acomp_ctx __percpu *acomp_ctx;
+	struct crypto_acomp_ctx __percpu *acomp_batch_ctx;
 	struct percpu_ref ref;
 	struct list_head list;
 	struct work_struct release_work;
@@ -287,10 +288,14 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 
 	pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
 	if (!pool->acomp_ctx) {
-		pr_err("percpu alloc failed\n");
+		pr_err("percpu acomp_ctx alloc failed\n");
 		goto error;
 	}
 
+	pool->acomp_batch_ctx = alloc_percpu(*pool->acomp_batch_ctx);
+	if (!pool->acomp_batch_ctx)
+		pr_err("percpu acomp_batch_ctx alloc failed\n");
+
 	ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
 				       &pool->node);
 	if (ret)
@@ -312,6 +317,8 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 ref_fail:
 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 error:
+	if (pool->acomp_batch_ctx)
+		free_percpu(pool->acomp_batch_ctx);
 	if (pool->acomp_ctx)
 		free_percpu(pool->acomp_ctx);
 	if (pool->zpool)
@@ -368,6 +375,8 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
 
 	cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
 	free_percpu(pool->acomp_ctx);
+	if (pool->acomp_batch_ctx)
+		free_percpu(pool->acomp_batch_ctx);
 
 	zpool_destroy_pool(pool->zpool);
 	kfree(pool);
@@ -930,6 +939,11 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 	struct crypto_acomp_ctx *acomp_ctx;
 
+	if (pool->acomp_batch_ctx) {
+		acomp_ctx = per_cpu_ptr(pool->acomp_batch_ctx, cpu);
+		acomp_ctx->nr_reqs = 0;
+	}
+
 	acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 	return zswap_create_acomp_ctx(cpu, acomp_ctx, pool->tfm_name, 1);
 }
@@ -939,6 +953,12 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
 	struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
 	struct crypto_acomp_ctx *acomp_ctx;
 
+	if (pool->acomp_batch_ctx) {
+		acomp_ctx = per_cpu_ptr(pool->acomp_batch_ctx, cpu);
+		if (!IS_ERR_OR_NULL(acomp_ctx) && (acomp_ctx->nr_reqs > 0))
+			zswap_delete_acomp_ctx(acomp_ctx);
+	}
+
 	acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
 	zswap_delete_acomp_ctx(acomp_ctx);
 
-- 
2.27.0
Re: [PATCH v3 10/13] mm: zswap: Add a per-cpu "acomp_batch_ctx" to struct zswap_pool.
Posted by Yosry Ahmed 2 weeks, 1 day ago
On Wed, Nov 6, 2024 at 11:21 AM Kanchana P Sridhar
<kanchana.p.sridhar@intel.com> wrote:
>
> This patch adds a separate per-cpu batching acomp context "acomp_batch_ctx"
> to the zswap_pool. The per-cpu acomp_batch_ctx pointer is allocated at pool
> creation time, but no per-cpu resources are allocated for it.
>
> The idea is to not incur the memory footprint cost of multiple acomp_reqs
> and buffers in the existing "acomp_ctx" for cases where compress batching
> is not possible; for instance, with software compressor algorithms, on
> systems without IAA, on systems with IAA that want to run the existing
> non-batching implementation of zswap_store() of large folios.
>
> By creating a separate acomp_batch_ctx, we have the ability to allocate
> additional memory per-cpu only if the zswap compressor supports batching,
> and if the user wants to enable the use of compress batching in
> zswap_store() to improve swapout performance of large folios.
>
> Suggested-by: Yosry Ahmed <yosryahmed@google.com>

This isn't needed if the sysctl is removed and we just allocate the
number of buffers during pool initialization, right? Same for the next
patch.


> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> ---
>  mm/zswap.c | 22 +++++++++++++++++++++-
>  1 file changed, 21 insertions(+), 1 deletion(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 02e031122fdf..80a928cf0f7e 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -160,6 +160,7 @@ struct crypto_acomp_ctx {
>  struct zswap_pool {
>         struct zpool *zpool;
>         struct crypto_acomp_ctx __percpu *acomp_ctx;
> +       struct crypto_acomp_ctx __percpu *acomp_batch_ctx;
>         struct percpu_ref ref;
>         struct list_head list;
>         struct work_struct release_work;
> @@ -287,10 +288,14 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
>
>         pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
>         if (!pool->acomp_ctx) {
> -               pr_err("percpu alloc failed\n");
> +               pr_err("percpu acomp_ctx alloc failed\n");
>                 goto error;
>         }
>
> +       pool->acomp_batch_ctx = alloc_percpu(*pool->acomp_batch_ctx);
> +       if (!pool->acomp_batch_ctx)
> +               pr_err("percpu acomp_batch_ctx alloc failed\n");
> +
>         ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
>                                        &pool->node);
>         if (ret)
> @@ -312,6 +317,8 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
>  ref_fail:
>         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
>  error:
> +       if (pool->acomp_batch_ctx)
> +               free_percpu(pool->acomp_batch_ctx);
>         if (pool->acomp_ctx)
>                 free_percpu(pool->acomp_ctx);
>         if (pool->zpool)
> @@ -368,6 +375,8 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
>
>         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
>         free_percpu(pool->acomp_ctx);
> +       if (pool->acomp_batch_ctx)
> +               free_percpu(pool->acomp_batch_ctx);
>
>         zpool_destroy_pool(pool->zpool);
>         kfree(pool);
> @@ -930,6 +939,11 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
>         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
>         struct crypto_acomp_ctx *acomp_ctx;
>
> +       if (pool->acomp_batch_ctx) {
> +               acomp_ctx = per_cpu_ptr(pool->acomp_batch_ctx, cpu);
> +               acomp_ctx->nr_reqs = 0;
> +       }
> +
>         acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
>         return zswap_create_acomp_ctx(cpu, acomp_ctx, pool->tfm_name, 1);
>  }
> @@ -939,6 +953,12 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
>         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
>         struct crypto_acomp_ctx *acomp_ctx;
>
> +       if (pool->acomp_batch_ctx) {
> +               acomp_ctx = per_cpu_ptr(pool->acomp_batch_ctx, cpu);
> +               if (!IS_ERR_OR_NULL(acomp_ctx) && (acomp_ctx->nr_reqs > 0))
> +                       zswap_delete_acomp_ctx(acomp_ctx);
> +       }
> +
>         acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
>         zswap_delete_acomp_ctx(acomp_ctx);
>
> --
> 2.27.0
>
RE: [PATCH v3 10/13] mm: zswap: Add a per-cpu "acomp_batch_ctx" to struct zswap_pool.
Posted by Sridhar, Kanchana P 2 weeks, 1 day ago
> -----Original Message-----
> From: Yosry Ahmed <yosryahmed@google.com>
> Sent: Friday, November 8, 2024 12:23 PM
> To: Sridhar, Kanchana P <kanchana.p.sridhar@intel.com>
> Cc: linux-kernel@vger.kernel.org; linux-mm@kvack.org;
> hannes@cmpxchg.org; nphamcs@gmail.com; chengming.zhou@linux.dev;
> usamaarif642@gmail.com; ryan.roberts@arm.com; Huang, Ying
> <ying.huang@intel.com>; 21cnbao@gmail.com; akpm@linux-foundation.org;
> linux-crypto@vger.kernel.org; herbert@gondor.apana.org.au;
> davem@davemloft.net; clabbe@baylibre.com; ardb@kernel.org;
> ebiggers@google.com; surenb@google.com; Accardi, Kristen C
> <kristen.c.accardi@intel.com>; zanussi@kernel.org; Feghali, Wajdi K
> <wajdi.k.feghali@intel.com>; Gopal, Vinodh <vinodh.gopal@intel.com>
> Subject: Re: [PATCH v3 10/13] mm: zswap: Add a per-cpu "acomp_batch_ctx"
> to struct zswap_pool.
> 
> On Wed, Nov 6, 2024 at 11:21 AM Kanchana P Sridhar
> <kanchana.p.sridhar@intel.com> wrote:
> >
> > This patch adds a separate per-cpu batching acomp context
> "acomp_batch_ctx"
> > to the zswap_pool. The per-cpu acomp_batch_ctx pointer is allocated at
> pool
> > creation time, but no per-cpu resources are allocated for it.
> >
> > The idea is to not incur the memory footprint cost of multiple acomp_reqs
> > and buffers in the existing "acomp_ctx" for cases where compress batching
> > is not possible; for instance, with software compressor algorithms, on
> > systems without IAA, on systems with IAA that want to run the existing
> > non-batching implementation of zswap_store() of large folios.
> >
> > By creating a separate acomp_batch_ctx, we have the ability to allocate
> > additional memory per-cpu only if the zswap compressor supports batching,
> > and if the user wants to enable the use of compress batching in
> > zswap_store() to improve swapout performance of large folios.
> >
> > Suggested-by: Yosry Ahmed <yosryahmed@google.com>
> 
> This isn't needed if the sysctl is removed and we just allocate the
> number of buffers during pool initialization, right? Same for the next
> patch.

That's correct.

Thanks,
Kanchana

> 
> 
> > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> > ---
> >  mm/zswap.c | 22 +++++++++++++++++++++-
> >  1 file changed, 21 insertions(+), 1 deletion(-)
> >
> > diff --git a/mm/zswap.c b/mm/zswap.c
> > index 02e031122fdf..80a928cf0f7e 100644
> > --- a/mm/zswap.c
> > +++ b/mm/zswap.c
> > @@ -160,6 +160,7 @@ struct crypto_acomp_ctx {
> >  struct zswap_pool {
> >         struct zpool *zpool;
> >         struct crypto_acomp_ctx __percpu *acomp_ctx;
> > +       struct crypto_acomp_ctx __percpu *acomp_batch_ctx;
> >         struct percpu_ref ref;
> >         struct list_head list;
> >         struct work_struct release_work;
> > @@ -287,10 +288,14 @@ static struct zswap_pool
> *zswap_pool_create(char *type, char *compressor)
> >
> >         pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
> >         if (!pool->acomp_ctx) {
> > -               pr_err("percpu alloc failed\n");
> > +               pr_err("percpu acomp_ctx alloc failed\n");
> >                 goto error;
> >         }
> >
> > +       pool->acomp_batch_ctx = alloc_percpu(*pool->acomp_batch_ctx);
> > +       if (!pool->acomp_batch_ctx)
> > +               pr_err("percpu acomp_batch_ctx alloc failed\n");
> > +
> >         ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
> >                                        &pool->node);
> >         if (ret)
> > @@ -312,6 +317,8 @@ static struct zswap_pool *zswap_pool_create(char
> *type, char *compressor)
> >  ref_fail:
> >         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
> &pool->node);
> >  error:
> > +       if (pool->acomp_batch_ctx)
> > +               free_percpu(pool->acomp_batch_ctx);
> >         if (pool->acomp_ctx)
> >                 free_percpu(pool->acomp_ctx);
> >         if (pool->zpool)
> > @@ -368,6 +375,8 @@ static void zswap_pool_destroy(struct zswap_pool
> *pool)
> >
> >         cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
> &pool->node);
> >         free_percpu(pool->acomp_ctx);
> > +       if (pool->acomp_batch_ctx)
> > +               free_percpu(pool->acomp_batch_ctx);
> >
> >         zpool_destroy_pool(pool->zpool);
> >         kfree(pool);
> > @@ -930,6 +939,11 @@ static int zswap_cpu_comp_prepare(unsigned int
> cpu, struct hlist_node *node)
> >         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
> >         struct crypto_acomp_ctx *acomp_ctx;
> >
> > +       if (pool->acomp_batch_ctx) {
> > +               acomp_ctx = per_cpu_ptr(pool->acomp_batch_ctx, cpu);
> > +               acomp_ctx->nr_reqs = 0;
> > +       }
> > +
> >         acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
> >         return zswap_create_acomp_ctx(cpu, acomp_ctx, pool->tfm_name, 1);
> >  }
> > @@ -939,6 +953,12 @@ static int zswap_cpu_comp_dead(unsigned int
> cpu, struct hlist_node *node)
> >         struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
> >         struct crypto_acomp_ctx *acomp_ctx;
> >
> > +       if (pool->acomp_batch_ctx) {
> > +               acomp_ctx = per_cpu_ptr(pool->acomp_batch_ctx, cpu);
> > +               if (!IS_ERR_OR_NULL(acomp_ctx) && (acomp_ctx->nr_reqs > 0))
> > +                       zswap_delete_acomp_ctx(acomp_ctx);
> > +       }
> > +
> >         acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
> >         zswap_delete_acomp_ctx(acomp_ctx);
> >
> > --
> > 2.27.0
> >