zswap_compress_folio() is modified to detect if the pool's acomp_ctx has
more than one "nr_reqs", which will be the case if the cpu onlining code
has allocated multiple batching resources in the acomp_ctx. If so, it means
compress batching can be used with a batch-size of "acomp_ctx->nr_reqs".
If compress batching can be used, zswap_compress_folio() will invoke the
newly added zswap_batch_compress() procedure to compress and store the
folio in batches of "acomp_ctx->nr_reqs" pages.
With Intel IAA, the iaa_crypto driver will compress each batch of pages in
parallel in hardware.
Hence, zswap_batch_compress() does the same computes for a batch, as
zswap_compress() does for a page; and returns true if the batch was
successfully compressed/stored, and false otherwise.
If the pool does not support compress batching, or the folio has only one
page, zswap_compress_folio() calls zswap_compress() for each individual
page in the folio, as before.
Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
---
mm/zswap.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 113 insertions(+), 9 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 6563d12e907b..f1cba77eda62 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -985,10 +985,11 @@ static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx)
mutex_unlock(&acomp_ctx->mutex);
}
+/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the caller. */
static bool zswap_compress(struct page *page, struct zswap_entry *entry,
- struct zswap_pool *pool)
+ struct zswap_pool *pool,
+ struct crypto_acomp_ctx *acomp_ctx)
{
- struct crypto_acomp_ctx *acomp_ctx;
struct scatterlist input, output;
int comp_ret = 0, alloc_ret = 0;
unsigned int dlen = PAGE_SIZE;
@@ -998,7 +999,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
gfp_t gfp;
u8 *dst;
- acomp_ctx = acomp_ctx_get_cpu_lock(pool);
dst = acomp_ctx->buffers[0];
sg_init_table(&input, 1);
sg_set_page(&input, page, PAGE_SIZE, 0);
@@ -1051,7 +1051,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
else if (alloc_ret)
zswap_reject_alloc_fail++;
- acomp_ctx_put_unlock(acomp_ctx);
return comp_ret == 0 && alloc_ret == 0;
}
@@ -1509,20 +1508,125 @@ static void shrink_worker(struct work_struct *w)
* main API
**********************************/
+/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the caller. */
+static bool zswap_batch_compress(struct folio *folio,
+ long index,
+ unsigned int batch_size,
+ struct zswap_entry *entries[],
+ struct zswap_pool *pool,
+ struct crypto_acomp_ctx *acomp_ctx)
+{
+ int comp_errors[ZSWAP_MAX_BATCH_SIZE] = { 0 };
+ unsigned int dlens[ZSWAP_MAX_BATCH_SIZE];
+ struct page *pages[ZSWAP_MAX_BATCH_SIZE];
+ unsigned int i, nr_batch_pages;
+ bool ret = true;
+
+ nr_batch_pages = min((unsigned int)(folio_nr_pages(folio) - index), batch_size);
+
+ for (i = 0; i < nr_batch_pages; ++i) {
+ pages[i] = folio_page(folio, index + i);
+ dlens[i] = PAGE_SIZE;
+ }
+
+ /*
+ * Batch compress @nr_batch_pages. If IAA is the compressor, the
+ * hardware will compress @nr_batch_pages in parallel.
+ */
+ ret = crypto_acomp_batch_compress(
+ acomp_ctx->reqs,
+ NULL,
+ pages,
+ acomp_ctx->buffers,
+ dlens,
+ comp_errors,
+ nr_batch_pages);
+
+ if (ret) {
+ /*
+ * All batch pages were successfully compressed.
+ * Store the pages in zpool.
+ */
+ struct zpool *zpool = pool->zpool;
+ gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+
+ if (zpool_malloc_support_movable(zpool))
+ gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
+
+ for (i = 0; i < nr_batch_pages; ++i) {
+ unsigned long handle;
+ char *buf;
+ int err;
+
+ err = zpool_malloc(zpool, dlens[i], gfp, &handle);
+
+ if (err) {
+ if (err == -ENOSPC)
+ zswap_reject_compress_poor++;
+ else
+ zswap_reject_alloc_fail++;
+
+ ret = false;
+ break;
+ }
+
+ buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
+ memcpy(buf, acomp_ctx->buffers[i], dlens[i]);
+ zpool_unmap_handle(zpool, handle);
+
+ entries[i]->handle = handle;
+ entries[i]->length = dlens[i];
+ }
+ } else {
+ /* Some batch pages had compression errors. */
+ for (i = 0; i < nr_batch_pages; ++i) {
+ if (comp_errors[i]) {
+ if (comp_errors[i] == -ENOSPC)
+ zswap_reject_compress_poor++;
+ else
+ zswap_reject_compress_fail++;
+ }
+ }
+ }
+
+ return ret;
+}
+
static bool zswap_compress_folio(struct folio *folio,
struct zswap_entry *entries[],
struct zswap_pool *pool)
{
long index, nr_pages = folio_nr_pages(folio);
+ struct crypto_acomp_ctx *acomp_ctx;
+ unsigned int batch_size;
+ bool ret = true;
- for (index = 0; index < nr_pages; ++index) {
- struct page *page = folio_page(folio, index);
+ acomp_ctx = acomp_ctx_get_cpu_lock(pool);
+ batch_size = acomp_ctx->nr_reqs;
+
+ if ((batch_size > 1) && (nr_pages > 1)) {
+ for (index = 0; index < nr_pages; index += batch_size) {
+
+ if (!zswap_batch_compress(folio, index, batch_size,
+ &entries[index], pool, acomp_ctx)) {
+ ret = false;
+ goto unlock_acomp_ctx;
+ }
+ }
+ } else {
+ for (index = 0; index < nr_pages; ++index) {
+ struct page *page = folio_page(folio, index);
- if (!zswap_compress(page, entries[index], pool))
- return false;
+ if (!zswap_compress(page, entries[index], pool, acomp_ctx)) {
+ ret = false;
+ goto unlock_acomp_ctx;
+ }
+ }
}
- return true;
+unlock_acomp_ctx:
+ acomp_ctx_put_unlock(acomp_ctx);
+ return ret;
}
/*
--
2.27.0
On Wed, Feb 05, 2025 at 11:21:01PM -0800, Kanchana P Sridhar wrote:
> zswap_compress_folio() is modified to detect if the pool's acomp_ctx has
> more than one "nr_reqs", which will be the case if the cpu onlining code
> has allocated multiple batching resources in the acomp_ctx. If so, it means
> compress batching can be used with a batch-size of "acomp_ctx->nr_reqs".
>
> If compress batching can be used, zswap_compress_folio() will invoke the
> newly added zswap_batch_compress() procedure to compress and store the
> folio in batches of "acomp_ctx->nr_reqs" pages.
>
> With Intel IAA, the iaa_crypto driver will compress each batch of pages in
> parallel in hardware.
>
> Hence, zswap_batch_compress() does the same computes for a batch, as
> zswap_compress() does for a page; and returns true if the batch was
> successfully compressed/stored, and false otherwise.
>
> If the pool does not support compress batching, or the folio has only one
> page, zswap_compress_folio() calls zswap_compress() for each individual
> page in the folio, as before.
>
> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> ---
> mm/zswap.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++++----
> 1 file changed, 113 insertions(+), 9 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index 6563d12e907b..f1cba77eda62 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -985,10 +985,11 @@ static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx)
> mutex_unlock(&acomp_ctx->mutex);
> }
>
> +/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the caller. */
Please use lockdep assertions rather than comments for internal locking rules.
> static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> - struct zswap_pool *pool)
> + struct zswap_pool *pool,
> + struct crypto_acomp_ctx *acomp_ctx)
> {
> - struct crypto_acomp_ctx *acomp_ctx;
> struct scatterlist input, output;
> int comp_ret = 0, alloc_ret = 0;
> unsigned int dlen = PAGE_SIZE;
> @@ -998,7 +999,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> gfp_t gfp;
> u8 *dst;
>
> - acomp_ctx = acomp_ctx_get_cpu_lock(pool);
> dst = acomp_ctx->buffers[0];
> sg_init_table(&input, 1);
> sg_set_page(&input, page, PAGE_SIZE, 0);
> @@ -1051,7 +1051,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> else if (alloc_ret)
> zswap_reject_alloc_fail++;
>
> - acomp_ctx_put_unlock(acomp_ctx);
> return comp_ret == 0 && alloc_ret == 0;
> }
>
> @@ -1509,20 +1508,125 @@ static void shrink_worker(struct work_struct *w)
> * main API
> **********************************/
>
> +/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the caller. */
> +static bool zswap_batch_compress(struct folio *folio,
> + long index,
> + unsigned int batch_size,
> + struct zswap_entry *entries[],
> + struct zswap_pool *pool,
> + struct crypto_acomp_ctx *acomp_ctx)
> +{
> + int comp_errors[ZSWAP_MAX_BATCH_SIZE] = { 0 };
> + unsigned int dlens[ZSWAP_MAX_BATCH_SIZE];
> + struct page *pages[ZSWAP_MAX_BATCH_SIZE];
> + unsigned int i, nr_batch_pages;
> + bool ret = true;
> +
> + nr_batch_pages = min((unsigned int)(folio_nr_pages(folio) - index), batch_size);
> +
> + for (i = 0; i < nr_batch_pages; ++i) {
> + pages[i] = folio_page(folio, index + i);
> + dlens[i] = PAGE_SIZE;
> + }
> +
> + /*
> + * Batch compress @nr_batch_pages. If IAA is the compressor, the
> + * hardware will compress @nr_batch_pages in parallel.
> + */
Please do not specifically mention IAA in zswap.c, as batching could be
supported in the future by other compressors.
> + ret = crypto_acomp_batch_compress(
> + acomp_ctx->reqs,
> + NULL,
> + pages,
> + acomp_ctx->buffers,
> + dlens,
> + comp_errors,
> + nr_batch_pages);
Does crypto_acomp_batch_compress() not require calling
crypto_wait_req()?
> +
> + if (ret) {
> + /*
> + * All batch pages were successfully compressed.
> + * Store the pages in zpool.
> + */
> + struct zpool *zpool = pool->zpool;
> + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
> +
> + if (zpool_malloc_support_movable(zpool))
> + gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
> +
> + for (i = 0; i < nr_batch_pages; ++i) {
> + unsigned long handle;
> + char *buf;
> + int err;
> +
> + err = zpool_malloc(zpool, dlens[i], gfp, &handle);
> +
> + if (err) {
> + if (err == -ENOSPC)
> + zswap_reject_compress_poor++;
> + else
> + zswap_reject_alloc_fail++;
> +
> + ret = false;
> + break;
> + }
> +
> + buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
> + memcpy(buf, acomp_ctx->buffers[i], dlens[i]);
> + zpool_unmap_handle(zpool, handle);
> +
> + entries[i]->handle = handle;
> + entries[i]->length = dlens[i];
> + }
> + } else {
> + /* Some batch pages had compression errors. */
> + for (i = 0; i < nr_batch_pages; ++i) {
> + if (comp_errors[i]) {
> + if (comp_errors[i] == -ENOSPC)
> + zswap_reject_compress_poor++;
> + else
> + zswap_reject_compress_fail++;
> + }
> + }
> + }
This function is awfully close to zswap_compress(). It's essentially a
vectorized version and uses crypto_acomp_batch_compress() instead of
crypto_acomp_compress().
My questions are:
- Can we use crypto_acomp_batch_compress() for the non-batched case as
well to unify the code? Does it cause any regressions?
- If we have to use different compressions APIs, can we at least reuse
the rest of the code? We can abstract the compression call into a
helper that chooses the appropriate API based on the batch size. The
rest should be the same AFAICT.
> +
> + return ret;
> +}
> +
> static bool zswap_compress_folio(struct folio *folio,
> struct zswap_entry *entries[],
> struct zswap_pool *pool)
> {
> long index, nr_pages = folio_nr_pages(folio);
> + struct crypto_acomp_ctx *acomp_ctx;
> + unsigned int batch_size;
> + bool ret = true;
>
> - for (index = 0; index < nr_pages; ++index) {
> - struct page *page = folio_page(folio, index);
> + acomp_ctx = acomp_ctx_get_cpu_lock(pool);
> + batch_size = acomp_ctx->nr_reqs;
> +
> + if ((batch_size > 1) && (nr_pages > 1)) {
> + for (index = 0; index < nr_pages; index += batch_size) {
> +
> + if (!zswap_batch_compress(folio, index, batch_size,
> + &entries[index], pool, acomp_ctx)) {
> + ret = false;
> + goto unlock_acomp_ctx;
> + }
> + }
> + } else {
> + for (index = 0; index < nr_pages; ++index) {
> + struct page *page = folio_page(folio, index);
>
> - if (!zswap_compress(page, entries[index], pool))
> - return false;
> + if (!zswap_compress(page, entries[index], pool, acomp_ctx)) {
> + ret = false;
> + goto unlock_acomp_ctx;
> + }
> + }
> }
>
> - return true;
> +unlock_acomp_ctx:
> + acomp_ctx_put_unlock(acomp_ctx);
> + return ret;
> }
>
> /*
> --
> 2.27.0
>
> -----Original Message-----
> From: Yosry Ahmed <yosry.ahmed@linux.dev>
> Sent: Thursday, February 6, 2025 11:11 AM
> To: Sridhar, Kanchana P <kanchana.p.sridhar@intel.com>
> Cc: linux-kernel@vger.kernel.org; linux-mm@kvack.org;
> hannes@cmpxchg.org; nphamcs@gmail.com; chengming.zhou@linux.dev;
> usamaarif642@gmail.com; ryan.roberts@arm.com; 21cnbao@gmail.com;
> akpm@linux-foundation.org; linux-crypto@vger.kernel.org;
> herbert@gondor.apana.org.au; davem@davemloft.net;
> clabbe@baylibre.com; ardb@kernel.org; ebiggers@google.com;
> surenb@google.com; Accardi, Kristen C <kristen.c.accardi@intel.com>;
> Feghali, Wajdi K <wajdi.k.feghali@intel.com>; Gopal, Vinodh
> <vinodh.gopal@intel.com>
> Subject: Re: [PATCH v6 15/16] mm: zswap: Compress batching with Intel IAA
> in zswap_store() of large folios.
>
> On Wed, Feb 05, 2025 at 11:21:01PM -0800, Kanchana P Sridhar wrote:
> > zswap_compress_folio() is modified to detect if the pool's acomp_ctx has
> > more than one "nr_reqs", which will be the case if the cpu onlining code
> > has allocated multiple batching resources in the acomp_ctx. If so, it means
> > compress batching can be used with a batch-size of "acomp_ctx->nr_reqs".
> >
> > If compress batching can be used, zswap_compress_folio() will invoke the
> > newly added zswap_batch_compress() procedure to compress and store the
> > folio in batches of "acomp_ctx->nr_reqs" pages.
> >
> > With Intel IAA, the iaa_crypto driver will compress each batch of pages in
> > parallel in hardware.
> >
> > Hence, zswap_batch_compress() does the same computes for a batch, as
> > zswap_compress() does for a page; and returns true if the batch was
> > successfully compressed/stored, and false otherwise.
> >
> > If the pool does not support compress batching, or the folio has only one
> > page, zswap_compress_folio() calls zswap_compress() for each individual
> > page in the folio, as before.
> >
> > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> > ---
> > mm/zswap.c | 122
> +++++++++++++++++++++++++++++++++++++++++++++++++----
> > 1 file changed, 113 insertions(+), 9 deletions(-)
> >
> > diff --git a/mm/zswap.c b/mm/zswap.c
> > index 6563d12e907b..f1cba77eda62 100644
> > --- a/mm/zswap.c
> > +++ b/mm/zswap.c
> > @@ -985,10 +985,11 @@ static void acomp_ctx_put_unlock(struct
> crypto_acomp_ctx *acomp_ctx)
> > mutex_unlock(&acomp_ctx->mutex);
> > }
> >
> > +/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the
> caller. */
>
> Please use lockdep assertions rather than comments for internal locking rules.
Sure. Thanks for the suggestion.
>
> > static bool zswap_compress(struct page *page, struct zswap_entry *entry,
> > - struct zswap_pool *pool)
> > + struct zswap_pool *pool,
> > + struct crypto_acomp_ctx *acomp_ctx)
> > {
> > - struct crypto_acomp_ctx *acomp_ctx;
> > struct scatterlist input, output;
> > int comp_ret = 0, alloc_ret = 0;
> > unsigned int dlen = PAGE_SIZE;
> > @@ -998,7 +999,6 @@ static bool zswap_compress(struct page *page,
> struct zswap_entry *entry,
> > gfp_t gfp;
> > u8 *dst;
> >
> > - acomp_ctx = acomp_ctx_get_cpu_lock(pool);
> > dst = acomp_ctx->buffers[0];
> > sg_init_table(&input, 1);
> > sg_set_page(&input, page, PAGE_SIZE, 0);
> > @@ -1051,7 +1051,6 @@ static bool zswap_compress(struct page *page,
> struct zswap_entry *entry,
> > else if (alloc_ret)
> > zswap_reject_alloc_fail++;
> >
> > - acomp_ctx_put_unlock(acomp_ctx);
> > return comp_ret == 0 && alloc_ret == 0;
> > }
> >
> > @@ -1509,20 +1508,125 @@ static void shrink_worker(struct work_struct
> *w)
> > * main API
> > **********************************/
> >
> > +/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the
> caller. */
> > +static bool zswap_batch_compress(struct folio *folio,
> > + long index,
> > + unsigned int batch_size,
> > + struct zswap_entry *entries[],
> > + struct zswap_pool *pool,
> > + struct crypto_acomp_ctx *acomp_ctx)
> > +{
> > + int comp_errors[ZSWAP_MAX_BATCH_SIZE] = { 0 };
> > + unsigned int dlens[ZSWAP_MAX_BATCH_SIZE];
> > + struct page *pages[ZSWAP_MAX_BATCH_SIZE];
> > + unsigned int i, nr_batch_pages;
> > + bool ret = true;
> > +
> > + nr_batch_pages = min((unsigned int)(folio_nr_pages(folio) - index),
> batch_size);
> > +
> > + for (i = 0; i < nr_batch_pages; ++i) {
> > + pages[i] = folio_page(folio, index + i);
> > + dlens[i] = PAGE_SIZE;
> > + }
> > +
> > + /*
> > + * Batch compress @nr_batch_pages. If IAA is the compressor, the
> > + * hardware will compress @nr_batch_pages in parallel.
> > + */
>
> Please do not specifically mention IAA in zswap.c, as batching could be
> supported in the future by other compressors.
Ok.
>
> > + ret = crypto_acomp_batch_compress(
> > + acomp_ctx->reqs,
> > + NULL,
> > + pages,
> > + acomp_ctx->buffers,
> > + dlens,
> > + comp_errors,
> > + nr_batch_pages);
>
> Does crypto_acomp_batch_compress() not require calling
> crypto_wait_req()?
It actually doesn't. If the crypto_wait parameter is NULL, the API requires
the driver to provide a way to process request completions asynchronously,
as described in patch 2 that adds the crypto batching API.
>
> > +
> > + if (ret) {
> > + /*
> > + * All batch pages were successfully compressed.
> > + * Store the pages in zpool.
> > + */
> > + struct zpool *zpool = pool->zpool;
> > + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN |
> __GFP_KSWAPD_RECLAIM;
> > +
> > + if (zpool_malloc_support_movable(zpool))
> > + gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
> > +
> > + for (i = 0; i < nr_batch_pages; ++i) {
> > + unsigned long handle;
> > + char *buf;
> > + int err;
> > +
> > + err = zpool_malloc(zpool, dlens[i], gfp, &handle);
> > +
> > + if (err) {
> > + if (err == -ENOSPC)
> > + zswap_reject_compress_poor++;
> > + else
> > + zswap_reject_alloc_fail++;
> > +
> > + ret = false;
> > + break;
> > + }
> > +
> > + buf = zpool_map_handle(zpool, handle,
> ZPOOL_MM_WO);
> > + memcpy(buf, acomp_ctx->buffers[i], dlens[i]);
> > + zpool_unmap_handle(zpool, handle);
> > +
> > + entries[i]->handle = handle;
> > + entries[i]->length = dlens[i];
> > + }
> > + } else {
> > + /* Some batch pages had compression errors. */
> > + for (i = 0; i < nr_batch_pages; ++i) {
> > + if (comp_errors[i]) {
> > + if (comp_errors[i] == -ENOSPC)
> > + zswap_reject_compress_poor++;
> > + else
> > + zswap_reject_compress_fail++;
> > + }
> > + }
> > + }
>
> This function is awfully close to zswap_compress(). It's essentially a
> vectorized version and uses crypto_acomp_batch_compress() instead of
> crypto_acomp_compress().
>
> My questions are:
> - Can we use crypto_acomp_batch_compress() for the non-batched case as
> well to unify the code? Does it cause any regressions?
>
> - If we have to use different compressions APIs, can we at least reuse
> the rest of the code? We can abstract the compression call into a
> helper that chooses the appropriate API based on the batch size. The
> rest should be the same AFAICT.
All good ideas. Let me think about this some more, and gather some data.
Thanks,
Kanchana
>
> > +
> > + return ret;
> > +}
> > +
> > static bool zswap_compress_folio(struct folio *folio,
> > struct zswap_entry *entries[],
> > struct zswap_pool *pool)
> > {
> > long index, nr_pages = folio_nr_pages(folio);
> > + struct crypto_acomp_ctx *acomp_ctx;
> > + unsigned int batch_size;
> > + bool ret = true;
> >
> > - for (index = 0; index < nr_pages; ++index) {
> > - struct page *page = folio_page(folio, index);
> > + acomp_ctx = acomp_ctx_get_cpu_lock(pool);
> > + batch_size = acomp_ctx->nr_reqs;
> > +
> > + if ((batch_size > 1) && (nr_pages > 1)) {
> > + for (index = 0; index < nr_pages; index += batch_size) {
> > +
> > + if (!zswap_batch_compress(folio, index, batch_size,
> > + &entries[index], pool,
> acomp_ctx)) {
> > + ret = false;
> > + goto unlock_acomp_ctx;
> > + }
> > + }
> > + } else {
> > + for (index = 0; index < nr_pages; ++index) {
> > + struct page *page = folio_page(folio, index);
> >
> > - if (!zswap_compress(page, entries[index], pool))
> > - return false;
> > + if (!zswap_compress(page, entries[index], pool,
> acomp_ctx)) {
> > + ret = false;
> > + goto unlock_acomp_ctx;
> > + }
> > + }
> > }
> >
> > - return true;
> > +unlock_acomp_ctx:
> > + acomp_ctx_put_unlock(acomp_ctx);
> > + return ret;
> > }
> >
> > /*
> > --
> > 2.27.0
> >
> -----Original Message-----
> From: Sridhar, Kanchana P <kanchana.p.sridhar@intel.com>
> Sent: Thursday, February 6, 2025 11:24 AM
> To: Yosry Ahmed <yosry.ahmed@linux.dev>
> Cc: linux-kernel@vger.kernel.org; linux-mm@kvack.org;
> hannes@cmpxchg.org; nphamcs@gmail.com; chengming.zhou@linux.dev;
> usamaarif642@gmail.com; ryan.roberts@arm.com; 21cnbao@gmail.com;
> akpm@linux-foundation.org; linux-crypto@vger.kernel.org;
> herbert@gondor.apana.org.au; davem@davemloft.net;
> clabbe@baylibre.com; ardb@kernel.org; ebiggers@google.com;
> surenb@google.com; Accardi, Kristen C <kristen.c.accardi@intel.com>;
> Feghali, Wajdi K <wajdi.k.feghali@intel.com>; Gopal, Vinodh
> <vinodh.gopal@intel.com>; Sridhar, Kanchana P
> <kanchana.p.sridhar@intel.com>
> Subject: RE: [PATCH v6 15/16] mm: zswap: Compress batching with Intel IAA
> in zswap_store() of large folios.
>
>
> > -----Original Message-----
> > From: Yosry Ahmed <yosry.ahmed@linux.dev>
> > Sent: Thursday, February 6, 2025 11:11 AM
> > To: Sridhar, Kanchana P <kanchana.p.sridhar@intel.com>
> > Cc: linux-kernel@vger.kernel.org; linux-mm@kvack.org;
> > hannes@cmpxchg.org; nphamcs@gmail.com; chengming.zhou@linux.dev;
> > usamaarif642@gmail.com; ryan.roberts@arm.com; 21cnbao@gmail.com;
> > akpm@linux-foundation.org; linux-crypto@vger.kernel.org;
> > herbert@gondor.apana.org.au; davem@davemloft.net;
> > clabbe@baylibre.com; ardb@kernel.org; ebiggers@google.com;
> > surenb@google.com; Accardi, Kristen C <kristen.c.accardi@intel.com>;
> > Feghali, Wajdi K <wajdi.k.feghali@intel.com>; Gopal, Vinodh
> > <vinodh.gopal@intel.com>
> > Subject: Re: [PATCH v6 15/16] mm: zswap: Compress batching with Intel
> IAA
> > in zswap_store() of large folios.
> >
> > On Wed, Feb 05, 2025 at 11:21:01PM -0800, Kanchana P Sridhar wrote:
> > > zswap_compress_folio() is modified to detect if the pool's acomp_ctx has
> > > more than one "nr_reqs", which will be the case if the cpu onlining code
> > > has allocated multiple batching resources in the acomp_ctx. If so, it means
> > > compress batching can be used with a batch-size of "acomp_ctx-
> >nr_reqs".
> > >
> > > If compress batching can be used, zswap_compress_folio() will invoke the
> > > newly added zswap_batch_compress() procedure to compress and store
> the
> > > folio in batches of "acomp_ctx->nr_reqs" pages.
> > >
> > > With Intel IAA, the iaa_crypto driver will compress each batch of pages in
> > > parallel in hardware.
> > >
> > > Hence, zswap_batch_compress() does the same computes for a batch, as
> > > zswap_compress() does for a page; and returns true if the batch was
> > > successfully compressed/stored, and false otherwise.
> > >
> > > If the pool does not support compress batching, or the folio has only one
> > > page, zswap_compress_folio() calls zswap_compress() for each individual
> > > page in the folio, as before.
> > >
> > > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
> > > ---
> > > mm/zswap.c | 122
> > +++++++++++++++++++++++++++++++++++++++++++++++++----
> > > 1 file changed, 113 insertions(+), 9 deletions(-)
> > >
> > > diff --git a/mm/zswap.c b/mm/zswap.c
> > > index 6563d12e907b..f1cba77eda62 100644
> > > --- a/mm/zswap.c
> > > +++ b/mm/zswap.c
> > > @@ -985,10 +985,11 @@ static void acomp_ctx_put_unlock(struct
> > crypto_acomp_ctx *acomp_ctx)
> > > mutex_unlock(&acomp_ctx->mutex);
> > > }
> > >
> > > +/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the
> > caller. */
> >
> > Please use lockdep assertions rather than comments for internal locking
> rules.
>
> Sure. Thanks for the suggestion.
>
> >
> > > static bool zswap_compress(struct page *page, struct zswap_entry
> *entry,
> > > - struct zswap_pool *pool)
> > > + struct zswap_pool *pool,
> > > + struct crypto_acomp_ctx *acomp_ctx)
> > > {
> > > - struct crypto_acomp_ctx *acomp_ctx;
> > > struct scatterlist input, output;
> > > int comp_ret = 0, alloc_ret = 0;
> > > unsigned int dlen = PAGE_SIZE;
> > > @@ -998,7 +999,6 @@ static bool zswap_compress(struct page *page,
> > struct zswap_entry *entry,
> > > gfp_t gfp;
> > > u8 *dst;
> > >
> > > - acomp_ctx = acomp_ctx_get_cpu_lock(pool);
> > > dst = acomp_ctx->buffers[0];
> > > sg_init_table(&input, 1);
> > > sg_set_page(&input, page, PAGE_SIZE, 0);
> > > @@ -1051,7 +1051,6 @@ static bool zswap_compress(struct page *page,
> > struct zswap_entry *entry,
> > > else if (alloc_ret)
> > > zswap_reject_alloc_fail++;
> > >
> > > - acomp_ctx_put_unlock(acomp_ctx);
> > > return comp_ret == 0 && alloc_ret == 0;
> > > }
> > >
> > > @@ -1509,20 +1508,125 @@ static void shrink_worker(struct
> work_struct
> > *w)
> > > * main API
> > > **********************************/
> > >
> > > +/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the
> > caller. */
> > > +static bool zswap_batch_compress(struct folio *folio,
> > > + long index,
> > > + unsigned int batch_size,
> > > + struct zswap_entry *entries[],
> > > + struct zswap_pool *pool,
> > > + struct crypto_acomp_ctx *acomp_ctx)
> > > +{
> > > + int comp_errors[ZSWAP_MAX_BATCH_SIZE] = { 0 };
> > > + unsigned int dlens[ZSWAP_MAX_BATCH_SIZE];
> > > + struct page *pages[ZSWAP_MAX_BATCH_SIZE];
> > > + unsigned int i, nr_batch_pages;
> > > + bool ret = true;
> > > +
> > > + nr_batch_pages = min((unsigned int)(folio_nr_pages(folio) - index),
> > batch_size);
> > > +
> > > + for (i = 0; i < nr_batch_pages; ++i) {
> > > + pages[i] = folio_page(folio, index + i);
> > > + dlens[i] = PAGE_SIZE;
> > > + }
> > > +
> > > + /*
> > > + * Batch compress @nr_batch_pages. If IAA is the compressor, the
> > > + * hardware will compress @nr_batch_pages in parallel.
> > > + */
> >
> > Please do not specifically mention IAA in zswap.c, as batching could be
> > supported in the future by other compressors.
>
> Ok.
>
> >
> > > + ret = crypto_acomp_batch_compress(
> > > + acomp_ctx->reqs,
> > > + NULL,
> > > + pages,
> > > + acomp_ctx->buffers,
> > > + dlens,
> > > + comp_errors,
> > > + nr_batch_pages);
> >
> > Does crypto_acomp_batch_compress() not require calling
> > crypto_wait_req()?
>
> It actually doesn't. If the crypto_wait parameter is NULL, the API requires
> the driver to provide a way to process request completions asynchronously,
> as described in patch 2 that adds the crypto batching API.
>
> >
> > > +
> > > + if (ret) {
> > > + /*
> > > + * All batch pages were successfully compressed.
> > > + * Store the pages in zpool.
> > > + */
> > > + struct zpool *zpool = pool->zpool;
> > > + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN |
> > __GFP_KSWAPD_RECLAIM;
> > > +
> > > + if (zpool_malloc_support_movable(zpool))
> > > + gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
> > > +
> > > + for (i = 0; i < nr_batch_pages; ++i) {
> > > + unsigned long handle;
> > > + char *buf;
> > > + int err;
> > > +
> > > + err = zpool_malloc(zpool, dlens[i], gfp, &handle);
> > > +
> > > + if (err) {
> > > + if (err == -ENOSPC)
> > > + zswap_reject_compress_poor++;
> > > + else
> > > + zswap_reject_alloc_fail++;
> > > +
> > > + ret = false;
> > > + break;
> > > + }
> > > +
> > > + buf = zpool_map_handle(zpool, handle,
> > ZPOOL_MM_WO);
> > > + memcpy(buf, acomp_ctx->buffers[i], dlens[i]);
> > > + zpool_unmap_handle(zpool, handle);
> > > +
> > > + entries[i]->handle = handle;
> > > + entries[i]->length = dlens[i];
> > > + }
> > > + } else {
> > > + /* Some batch pages had compression errors. */
> > > + for (i = 0; i < nr_batch_pages; ++i) {
> > > + if (comp_errors[i]) {
> > > + if (comp_errors[i] == -ENOSPC)
> > > + zswap_reject_compress_poor++;
> > > + else
> > > + zswap_reject_compress_fail++;
> > > + }
> > > + }
> > > + }
> >
> > This function is awfully close to zswap_compress(). It's essentially a
> > vectorized version and uses crypto_acomp_batch_compress() instead of
> > crypto_acomp_compress().
> >
> > My questions are:
> > - Can we use crypto_acomp_batch_compress() for the non-batched case as
> > well to unify the code? Does it cause any regressions?
> >
> > - If we have to use different compressions APIs, can we at least reuse
> > the rest of the code? We can abstract the compression call into a
> > helper that chooses the appropriate API based on the batch size. The
> > rest should be the same AFAICT.
>
> All good ideas. Let me think about this some more, and gather some data.
Based on Herbert's suggestion, in v7, I have separated out the compress batching
API to a new zswap_batch_compress() that uses request chaining.
zswap_compress() exists in its current form with the only difference being that
I get the mutex lock once, in zswap_store_folio() to decide whether or not to
batch.
I will provide more details in response to your comments about exploring
smaller batches to address the zstd regression seen in v5 and fixed in v6.
Thanks,
Kanchana
>
> Thanks,
> Kanchana
>
> >
> > > +
> > > + return ret;
> > > +}
> > > +
> > > static bool zswap_compress_folio(struct folio *folio,
> > > struct zswap_entry *entries[],
> > > struct zswap_pool *pool)
> > > {
> > > long index, nr_pages = folio_nr_pages(folio);
> > > + struct crypto_acomp_ctx *acomp_ctx;
> > > + unsigned int batch_size;
> > > + bool ret = true;
> > >
> > > - for (index = 0; index < nr_pages; ++index) {
> > > - struct page *page = folio_page(folio, index);
> > > + acomp_ctx = acomp_ctx_get_cpu_lock(pool);
> > > + batch_size = acomp_ctx->nr_reqs;
> > > +
> > > + if ((batch_size > 1) && (nr_pages > 1)) {
> > > + for (index = 0; index < nr_pages; index += batch_size) {
> > > +
> > > + if (!zswap_batch_compress(folio, index, batch_size,
> > > + &entries[index], pool,
> > acomp_ctx)) {
> > > + ret = false;
> > > + goto unlock_acomp_ctx;
> > > + }
> > > + }
> > > + } else {
> > > + for (index = 0; index < nr_pages; ++index) {
> > > + struct page *page = folio_page(folio, index);
> > >
> > > - if (!zswap_compress(page, entries[index], pool))
> > > - return false;
> > > + if (!zswap_compress(page, entries[index], pool,
> > acomp_ctx)) {
> > > + ret = false;
> > > + goto unlock_acomp_ctx;
> > > + }
> > > + }
> > > }
> > >
> > > - return true;
> > > +unlock_acomp_ctx:
> > > + acomp_ctx_put_unlock(acomp_ctx);
> > > + return ret;
> > > }
> > >
> > > /*
> > > --
> > > 2.27.0
> > >
© 2016 - 2025 Red Hat, Inc.