This patch merely moves zswap_cpu_comp_prepare() and
zswap_cpu_comp_dead() to be in the "pool functions" section because
these functions are invoked upon pool creation/deletion.
Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
---
mm/zswap.c | 188 ++++++++++++++++++++++++++---------------------------
1 file changed, 94 insertions(+), 94 deletions(-)
diff --git a/mm/zswap.c b/mm/zswap.c
index 3c0fd8a137182..3538ecaed5e16 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -248,6 +248,100 @@ static inline struct xarray *swap_zswap_tree(swp_entry_t swp)
**********************************/
static void __zswap_pool_empty(struct percpu_ref *ref);
+static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
+{
+ struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+ struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+ struct crypto_acomp *acomp = NULL;
+ struct acomp_req *req = NULL;
+ u8 *buffer = NULL;
+ int ret;
+
+ buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
+ if (IS_ERR(acomp)) {
+ pr_err("could not alloc crypto acomp %s : %ld\n",
+ pool->tfm_name, PTR_ERR(acomp));
+ ret = PTR_ERR(acomp);
+ goto fail;
+ }
+
+ req = acomp_request_alloc(acomp);
+ if (!req) {
+ pr_err("could not alloc crypto acomp_request %s\n",
+ pool->tfm_name);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * Only hold the mutex after completing allocations, otherwise we may
+ * recurse into zswap through reclaim and attempt to hold the mutex
+ * again resulting in a deadlock.
+ */
+ mutex_lock(&acomp_ctx->mutex);
+ crypto_init_wait(&acomp_ctx->wait);
+
+ /*
+ * if the backend of acomp is async zip, crypto_req_done() will wakeup
+ * crypto_wait_req(); if the backend of acomp is scomp, the callback
+ * won't be called, crypto_wait_req() will return without blocking.
+ */
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &acomp_ctx->wait);
+
+ acomp_ctx->buffer = buffer;
+ acomp_ctx->acomp = acomp;
+ acomp_ctx->is_sleepable = acomp_is_async(acomp);
+ acomp_ctx->req = req;
+ mutex_unlock(&acomp_ctx->mutex);
+ return 0;
+
+fail:
+ if (acomp)
+ crypto_free_acomp(acomp);
+ kfree(buffer);
+ return ret;
+}
+
+static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+ struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+ struct acomp_req *req;
+ struct crypto_acomp *acomp;
+ u8 *buffer;
+
+ if (IS_ERR_OR_NULL(acomp_ctx))
+ return 0;
+
+ mutex_lock(&acomp_ctx->mutex);
+ req = acomp_ctx->req;
+ acomp = acomp_ctx->acomp;
+ buffer = acomp_ctx->buffer;
+ acomp_ctx->req = NULL;
+ acomp_ctx->acomp = NULL;
+ acomp_ctx->buffer = NULL;
+ mutex_unlock(&acomp_ctx->mutex);
+
+ /*
+ * Do the actual freeing after releasing the mutex to avoid subtle
+ * locking dependencies causing deadlocks.
+ */
+ if (!IS_ERR_OR_NULL(req))
+ acomp_request_free(req);
+ if (!IS_ERR_OR_NULL(acomp))
+ crypto_free_acomp(acomp);
+ kfree(buffer);
+
+ return 0;
+}
+
static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
{
struct zswap_pool *pool;
@@ -818,100 +912,6 @@ static void zswap_entry_free(struct zswap_entry *entry)
/*********************************
* compressed storage functions
**********************************/
-static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
-{
- struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
- struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
- struct crypto_acomp *acomp = NULL;
- struct acomp_req *req = NULL;
- u8 *buffer = NULL;
- int ret;
-
- buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
- if (!buffer) {
- ret = -ENOMEM;
- goto fail;
- }
-
- acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
- if (IS_ERR(acomp)) {
- pr_err("could not alloc crypto acomp %s : %ld\n",
- pool->tfm_name, PTR_ERR(acomp));
- ret = PTR_ERR(acomp);
- goto fail;
- }
-
- req = acomp_request_alloc(acomp);
- if (!req) {
- pr_err("could not alloc crypto acomp_request %s\n",
- pool->tfm_name);
- ret = -ENOMEM;
- goto fail;
- }
-
- /*
- * Only hold the mutex after completing allocations, otherwise we may
- * recurse into zswap through reclaim and attempt to hold the mutex
- * again resulting in a deadlock.
- */
- mutex_lock(&acomp_ctx->mutex);
- crypto_init_wait(&acomp_ctx->wait);
-
- /*
- * if the backend of acomp is async zip, crypto_req_done() will wakeup
- * crypto_wait_req(); if the backend of acomp is scomp, the callback
- * won't be called, crypto_wait_req() will return without blocking.
- */
- acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- crypto_req_done, &acomp_ctx->wait);
-
- acomp_ctx->buffer = buffer;
- acomp_ctx->acomp = acomp;
- acomp_ctx->is_sleepable = acomp_is_async(acomp);
- acomp_ctx->req = req;
- mutex_unlock(&acomp_ctx->mutex);
- return 0;
-
-fail:
- if (acomp)
- crypto_free_acomp(acomp);
- kfree(buffer);
- return ret;
-}
-
-static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
-{
- struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
- struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
- struct acomp_req *req;
- struct crypto_acomp *acomp;
- u8 *buffer;
-
- if (IS_ERR_OR_NULL(acomp_ctx))
- return 0;
-
- mutex_lock(&acomp_ctx->mutex);
- req = acomp_ctx->req;
- acomp = acomp_ctx->acomp;
- buffer = acomp_ctx->buffer;
- acomp_ctx->req = NULL;
- acomp_ctx->acomp = NULL;
- acomp_ctx->buffer = NULL;
- mutex_unlock(&acomp_ctx->mutex);
-
- /*
- * Do the actual freeing after releasing the mutex to avoid subtle
- * locking dependencies causing deadlocks.
- */
- if (!IS_ERR_OR_NULL(req))
- acomp_request_free(req);
- if (!IS_ERR_OR_NULL(acomp))
- crypto_free_acomp(acomp);
- kfree(buffer);
-
- return 0;
-}
-
static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool)
{
struct crypto_acomp_ctx *acomp_ctx;
--
2.27.0
On Thu, Jul 3, 2025 at 9:23 PM Kanchana P Sridhar <kanchana.p.sridhar@intel.com> wrote: > > This patch merely moves zswap_cpu_comp_prepare() and > zswap_cpu_comp_dead() to be in the "pool functions" section because > these functions are invoked upon pool creation/deletion. Hmm idk, "compressed storage" section seems fitting for zswap_cpu_comp_prepare() and zswap_cpu_comp_dead(). Is this patch necessary? > > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com> > --- > mm/zswap.c | 188 ++++++++++++++++++++++++++--------------------------- > 1 file changed, 94 insertions(+), 94 deletions(-) > > diff --git a/mm/zswap.c b/mm/zswap.c > index 3c0fd8a137182..3538ecaed5e16 100644 > --- a/mm/zswap.c > +++ b/mm/zswap.c > @@ -248,6 +248,100 @@ static inline struct xarray *swap_zswap_tree(swp_entry_t swp) > **********************************/ > static void __zswap_pool_empty(struct percpu_ref *ref); > > +static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) > +{ > + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > + struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); > + struct crypto_acomp *acomp = NULL; > + struct acomp_req *req = NULL; > + u8 *buffer = NULL; > + int ret; > + > + buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); > + if (!buffer) { > + ret = -ENOMEM; > + goto fail; > + } > + > + acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); > + if (IS_ERR(acomp)) { > + pr_err("could not alloc crypto acomp %s : %ld\n", > + pool->tfm_name, PTR_ERR(acomp)); > + ret = PTR_ERR(acomp); > + goto fail; > + } > + > + req = acomp_request_alloc(acomp); > + if (!req) { > + pr_err("could not alloc crypto acomp_request %s\n", > + pool->tfm_name); > + ret = -ENOMEM; > + goto fail; > + } > + > + /* > + * Only hold the mutex after completing allocations, otherwise we may > + * recurse into zswap through reclaim and attempt to hold the mutex > + * again resulting in a deadlock. > + */ > + mutex_lock(&acomp_ctx->mutex); > + crypto_init_wait(&acomp_ctx->wait); > + > + /* > + * if the backend of acomp is async zip, crypto_req_done() will wakeup > + * crypto_wait_req(); if the backend of acomp is scomp, the callback > + * won't be called, crypto_wait_req() will return without blocking. > + */ > + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, > + crypto_req_done, &acomp_ctx->wait); > + > + acomp_ctx->buffer = buffer; > + acomp_ctx->acomp = acomp; > + acomp_ctx->is_sleepable = acomp_is_async(acomp); > + acomp_ctx->req = req; > + mutex_unlock(&acomp_ctx->mutex); > + return 0; > + > +fail: > + if (acomp) > + crypto_free_acomp(acomp); > + kfree(buffer); > + return ret; > +} > + > +static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) > +{ > + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > + struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); > + struct acomp_req *req; > + struct crypto_acomp *acomp; > + u8 *buffer; > + > + if (IS_ERR_OR_NULL(acomp_ctx)) > + return 0; > + > + mutex_lock(&acomp_ctx->mutex); > + req = acomp_ctx->req; > + acomp = acomp_ctx->acomp; > + buffer = acomp_ctx->buffer; > + acomp_ctx->req = NULL; > + acomp_ctx->acomp = NULL; > + acomp_ctx->buffer = NULL; > + mutex_unlock(&acomp_ctx->mutex); > + > + /* > + * Do the actual freeing after releasing the mutex to avoid subtle > + * locking dependencies causing deadlocks. > + */ > + if (!IS_ERR_OR_NULL(req)) > + acomp_request_free(req); > + if (!IS_ERR_OR_NULL(acomp)) > + crypto_free_acomp(acomp); > + kfree(buffer); > + > + return 0; > +} > + > static struct zswap_pool *zswap_pool_create(char *type, char *compressor) > { > struct zswap_pool *pool; > @@ -818,100 +912,6 @@ static void zswap_entry_free(struct zswap_entry *entry) > /********************************* > * compressed storage functions > **********************************/ > -static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) > -{ > - struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > - struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); > - struct crypto_acomp *acomp = NULL; > - struct acomp_req *req = NULL; > - u8 *buffer = NULL; > - int ret; > - > - buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); > - if (!buffer) { > - ret = -ENOMEM; > - goto fail; > - } > - > - acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu)); > - if (IS_ERR(acomp)) { > - pr_err("could not alloc crypto acomp %s : %ld\n", > - pool->tfm_name, PTR_ERR(acomp)); > - ret = PTR_ERR(acomp); > - goto fail; > - } > - > - req = acomp_request_alloc(acomp); > - if (!req) { > - pr_err("could not alloc crypto acomp_request %s\n", > - pool->tfm_name); > - ret = -ENOMEM; > - goto fail; > - } > - > - /* > - * Only hold the mutex after completing allocations, otherwise we may > - * recurse into zswap through reclaim and attempt to hold the mutex > - * again resulting in a deadlock. > - */ > - mutex_lock(&acomp_ctx->mutex); > - crypto_init_wait(&acomp_ctx->wait); > - > - /* > - * if the backend of acomp is async zip, crypto_req_done() will wakeup > - * crypto_wait_req(); if the backend of acomp is scomp, the callback > - * won't be called, crypto_wait_req() will return without blocking. > - */ > - acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, > - crypto_req_done, &acomp_ctx->wait); > - > - acomp_ctx->buffer = buffer; > - acomp_ctx->acomp = acomp; > - acomp_ctx->is_sleepable = acomp_is_async(acomp); > - acomp_ctx->req = req; > - mutex_unlock(&acomp_ctx->mutex); > - return 0; > - > -fail: > - if (acomp) > - crypto_free_acomp(acomp); > - kfree(buffer); > - return ret; > -} > - > -static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) > -{ > - struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > - struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu); > - struct acomp_req *req; > - struct crypto_acomp *acomp; > - u8 *buffer; > - > - if (IS_ERR_OR_NULL(acomp_ctx)) > - return 0; > - > - mutex_lock(&acomp_ctx->mutex); > - req = acomp_ctx->req; > - acomp = acomp_ctx->acomp; > - buffer = acomp_ctx->buffer; > - acomp_ctx->req = NULL; > - acomp_ctx->acomp = NULL; > - acomp_ctx->buffer = NULL; > - mutex_unlock(&acomp_ctx->mutex); > - > - /* > - * Do the actual freeing after releasing the mutex to avoid subtle > - * locking dependencies causing deadlocks. > - */ > - if (!IS_ERR_OR_NULL(req)) > - acomp_request_free(req); > - if (!IS_ERR_OR_NULL(acomp)) > - crypto_free_acomp(acomp); > - kfree(buffer); > - > - return 0; > -} > - > static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct zswap_pool *pool) > { > struct crypto_acomp_ctx *acomp_ctx; > -- > 2.27.0 >
> -----Original Message----- > From: Nhat Pham <nphamcs@gmail.com> > Sent: Friday, July 4, 2025 11:39 AM > To: Sridhar, Kanchana P <kanchana.p.sridhar@intel.com> > Cc: linux-kernel@vger.kernel.org; linux-mm@kvack.org; > hannes@cmpxchg.org; yosry.ahmed@linux.dev; chengming.zhou@linux.dev; > usamaarif642@gmail.com; ryan.roberts@arm.com; 21cnbao@gmail.com; > ying.huang@linux.alibaba.com; akpm@linux-foundation.org; > senozhatsky@chromium.org; linux-crypto@vger.kernel.org; > herbert@gondor.apana.org.au; davem@davemloft.net; > clabbe@baylibre.com; ardb@kernel.org; ebiggers@google.com; > surenb@google.com; Accardi, Kristen C <kristen.c.accardi@intel.com>; > Gomes, Vinicius <vinicius.gomes@intel.com>; Feghali, Wajdi K > <wajdi.k.feghali@intel.com>; Gopal, Vinodh <vinodh.gopal@intel.com> > Subject: Re: [PATCH v10 20/25] mm: zswap: Move the CPU hotplug > procedures under "pool functions". > > On Thu, Jul 3, 2025 at 9:23 PM Kanchana P Sridhar > <kanchana.p.sridhar@intel.com> wrote: > > > > This patch merely moves zswap_cpu_comp_prepare() and > > zswap_cpu_comp_dead() to be in the "pool functions" section because > > these functions are invoked upon pool creation/deletion. > > Hmm idk, "compressed storage" section seems fitting for > zswap_cpu_comp_prepare() and zswap_cpu_comp_dead(). > > Is this patch necessary? Not too sure. I was moving it to be in "pool functions" because the next patch #21 makes the association between the acomp_ctx resources' lifetime to be from pool creation to deletion, and also does not register a teardown callback. I can move the zswap_cpu_comp_prepare() and modifications back to "compressed storage" if you think that's where it belongs. Thanks, Kanchana > > > > > Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@intel.com> > > --- > > mm/zswap.c | 188 ++++++++++++++++++++++++++--------------------------- > > 1 file changed, 94 insertions(+), 94 deletions(-) > > > > diff --git a/mm/zswap.c b/mm/zswap.c > > index 3c0fd8a137182..3538ecaed5e16 100644 > > --- a/mm/zswap.c > > +++ b/mm/zswap.c > > @@ -248,6 +248,100 @@ static inline struct xarray > *swap_zswap_tree(swp_entry_t swp) > > **********************************/ > > static void __zswap_pool_empty(struct percpu_ref *ref); > > > > +static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node > *node) > > +{ > > + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > > + struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool- > >acomp_ctx, cpu); > > + struct crypto_acomp *acomp = NULL; > > + struct acomp_req *req = NULL; > > + u8 *buffer = NULL; > > + int ret; > > + > > + buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, > cpu_to_node(cpu)); > > + if (!buffer) { > > + ret = -ENOMEM; > > + goto fail; > > + } > > + > > + acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, > cpu_to_node(cpu)); > > + if (IS_ERR(acomp)) { > > + pr_err("could not alloc crypto acomp %s : %ld\n", > > + pool->tfm_name, PTR_ERR(acomp)); > > + ret = PTR_ERR(acomp); > > + goto fail; > > + } > > + > > + req = acomp_request_alloc(acomp); > > + if (!req) { > > + pr_err("could not alloc crypto acomp_request %s\n", > > + pool->tfm_name); > > + ret = -ENOMEM; > > + goto fail; > > + } > > + > > + /* > > + * Only hold the mutex after completing allocations, otherwise we may > > + * recurse into zswap through reclaim and attempt to hold the mutex > > + * again resulting in a deadlock. > > + */ > > + mutex_lock(&acomp_ctx->mutex); > > + crypto_init_wait(&acomp_ctx->wait); > > + > > + /* > > + * if the backend of acomp is async zip, crypto_req_done() will wakeup > > + * crypto_wait_req(); if the backend of acomp is scomp, the callback > > + * won't be called, crypto_wait_req() will return without blocking. > > + */ > > + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, > > + crypto_req_done, &acomp_ctx->wait); > > + > > + acomp_ctx->buffer = buffer; > > + acomp_ctx->acomp = acomp; > > + acomp_ctx->is_sleepable = acomp_is_async(acomp); > > + acomp_ctx->req = req; > > + mutex_unlock(&acomp_ctx->mutex); > > + return 0; > > + > > +fail: > > + if (acomp) > > + crypto_free_acomp(acomp); > > + kfree(buffer); > > + return ret; > > +} > > + > > +static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node > *node) > > +{ > > + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > > + struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool- > >acomp_ctx, cpu); > > + struct acomp_req *req; > > + struct crypto_acomp *acomp; > > + u8 *buffer; > > + > > + if (IS_ERR_OR_NULL(acomp_ctx)) > > + return 0; > > + > > + mutex_lock(&acomp_ctx->mutex); > > + req = acomp_ctx->req; > > + acomp = acomp_ctx->acomp; > > + buffer = acomp_ctx->buffer; > > + acomp_ctx->req = NULL; > > + acomp_ctx->acomp = NULL; > > + acomp_ctx->buffer = NULL; > > + mutex_unlock(&acomp_ctx->mutex); > > + > > + /* > > + * Do the actual freeing after releasing the mutex to avoid subtle > > + * locking dependencies causing deadlocks. > > + */ > > + if (!IS_ERR_OR_NULL(req)) > > + acomp_request_free(req); > > + if (!IS_ERR_OR_NULL(acomp)) > > + crypto_free_acomp(acomp); > > + kfree(buffer); > > + > > + return 0; > > +} > > + > > static struct zswap_pool *zswap_pool_create(char *type, char > *compressor) > > { > > struct zswap_pool *pool; > > @@ -818,100 +912,6 @@ static void zswap_entry_free(struct zswap_entry > *entry) > > /********************************* > > * compressed storage functions > > **********************************/ > > -static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node > *node) > > -{ > > - struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > > - struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, > cpu); > > - struct crypto_acomp *acomp = NULL; > > - struct acomp_req *req = NULL; > > - u8 *buffer = NULL; > > - int ret; > > - > > - buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, > cpu_to_node(cpu)); > > - if (!buffer) { > > - ret = -ENOMEM; > > - goto fail; > > - } > > - > > - acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, > cpu_to_node(cpu)); > > - if (IS_ERR(acomp)) { > > - pr_err("could not alloc crypto acomp %s : %ld\n", > > - pool->tfm_name, PTR_ERR(acomp)); > > - ret = PTR_ERR(acomp); > > - goto fail; > > - } > > - > > - req = acomp_request_alloc(acomp); > > - if (!req) { > > - pr_err("could not alloc crypto acomp_request %s\n", > > - pool->tfm_name); > > - ret = -ENOMEM; > > - goto fail; > > - } > > - > > - /* > > - * Only hold the mutex after completing allocations, otherwise we may > > - * recurse into zswap through reclaim and attempt to hold the mutex > > - * again resulting in a deadlock. > > - */ > > - mutex_lock(&acomp_ctx->mutex); > > - crypto_init_wait(&acomp_ctx->wait); > > - > > - /* > > - * if the backend of acomp is async zip, crypto_req_done() will wakeup > > - * crypto_wait_req(); if the backend of acomp is scomp, the callback > > - * won't be called, crypto_wait_req() will return without blocking. > > - */ > > - acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, > > - crypto_req_done, &acomp_ctx->wait); > > - > > - acomp_ctx->buffer = buffer; > > - acomp_ctx->acomp = acomp; > > - acomp_ctx->is_sleepable = acomp_is_async(acomp); > > - acomp_ctx->req = req; > > - mutex_unlock(&acomp_ctx->mutex); > > - return 0; > > - > > -fail: > > - if (acomp) > > - crypto_free_acomp(acomp); > > - kfree(buffer); > > - return ret; > > -} > > - > > -static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node > *node) > > -{ > > - struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); > > - struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, > cpu); > > - struct acomp_req *req; > > - struct crypto_acomp *acomp; > > - u8 *buffer; > > - > > - if (IS_ERR_OR_NULL(acomp_ctx)) > > - return 0; > > - > > - mutex_lock(&acomp_ctx->mutex); > > - req = acomp_ctx->req; > > - acomp = acomp_ctx->acomp; > > - buffer = acomp_ctx->buffer; > > - acomp_ctx->req = NULL; > > - acomp_ctx->acomp = NULL; > > - acomp_ctx->buffer = NULL; > > - mutex_unlock(&acomp_ctx->mutex); > > - > > - /* > > - * Do the actual freeing after releasing the mutex to avoid subtle > > - * locking dependencies causing deadlocks. > > - */ > > - if (!IS_ERR_OR_NULL(req)) > > - acomp_request_free(req); > > - if (!IS_ERR_OR_NULL(acomp)) > > - crypto_free_acomp(acomp); > > - kfree(buffer); > > - > > - return 0; > > -} > > - > > static struct crypto_acomp_ctx *acomp_ctx_get_cpu_lock(struct > zswap_pool *pool) > > { > > struct crypto_acomp_ctx *acomp_ctx; > > -- > > 2.27.0 > >
© 2016 - 2025 Red Hat, Inc.