cryptd_queue::cryptd_cpu_queue is a per-CPU variable and relies on
disabled BH for its locking. Without per-CPU locking in
local_bh_disable() on PREEMPT_RT this data structure requires explicit
locking.
Add a local_lock_t to the struct cryptd_cpu_queue and use
local_lock_nested_bh() for locking. This change adds only lockdep
coverage and does not alter the functional behaviour for !PREEMPT_RT.
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: linux-crypto@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
crypto/cryptd.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 31d022d47f7a0..39c9e83a3a5b8 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -34,6 +34,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
static struct workqueue_struct *cryptd_wq;
struct cryptd_cpu_queue {
+ local_lock_t bh_lock;
struct crypto_queue queue;
struct work_struct work;
};
@@ -110,6 +111,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+ local_lock_init(&cpu_queue->bh_lock);
}
pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
@@ -135,6 +137,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
refcount_t *refcnt;
local_bh_disable();
+ local_lock_nested_bh(&queue->cpu_queue->bh_lock);
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
@@ -151,6 +154,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
refcount_inc(refcnt);
out:
+ local_unlock_nested_bh(&queue->cpu_queue->bh_lock);
local_bh_enable();
return err;
@@ -169,8 +173,10 @@ static void cryptd_queue_worker(struct work_struct *work)
* Only handle one request at a time to avoid hogging crypto workqueue.
*/
local_bh_disable();
+ __local_lock_nested_bh(&cpu_queue->bh_lock);
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
+ __local_unlock_nested_bh(&cpu_queue->bh_lock);
local_bh_enable();
if (!req)
--
2.49.0
On 2025-05-14 13:07:50 [+0200], To linux-kernel@vger.kernel.org wrote: > cryptd_queue::cryptd_cpu_queue is a per-CPU variable and relies on > disabled BH for its locking. Without per-CPU locking in > local_bh_disable() on PREEMPT_RT this data structure requires explicit > locking. > > Add a local_lock_t to the struct cryptd_cpu_queue and use > local_lock_nested_bh() for locking. This change adds only lockdep > coverage and does not alter the functional behaviour for !PREEMPT_RT. > > Cc: "David S. Miller" <davem@davemloft.net> > Cc: Herbert Xu <herbert@gondor.apana.org.au> > Cc: linux-crypto@vger.kernel.org > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> This is meant as an example for #1 and should not be applied. Sebastian
© 2016 - 2026 Red Hat, Inc.