lib/rhashtable.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
Move the hash table growth check and work scheduling outside the
rht lock to prevent a possible circular locking dependency.
The original implementation could trigger a lockdep warning due to
a potential deadlock scenario involving nested locks between
rhashtable bucket, rq lock, and dsq lock. By relocating the
growth check and work scheduling after releasing the rth lock, we break
this potential deadlock chain.
This change expands the flexibility of rhashtable by removing
restrictive locking that previously limited its use in scheduler
and workqueue contexts.
Import to say that this calls rht_grow_above_75(), which reads from
struct rhashtable without holding the lock, if this is a problem, we can
move the check to the lock, and schedule the workqueue after the lock.
Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class")
Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Breno Leitao <leitao@debian.org>
---
lib/rhashtable.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6c902639728b767cc3ee42c61256d2e9618e6ce7..5a27ccd72db9a25d92d1ed2f8d519afcfc672afe 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -585,9 +585,6 @@ static struct bucket_table *rhashtable_insert_one(
rht_assign_locked(bkt, obj);
atomic_inc(&ht->nelems);
- if (rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
-
return NULL;
}
@@ -624,6 +621,9 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
data = ERR_CAST(new_tbl);
rht_unlock(tbl, bkt, flags);
+ if (rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
+
}
} while (!IS_ERR_OR_NULL(new_tbl));
---
base-commit: 0a31ca318eea4da46e5f495c79ccc4442c92f4dc
change-id: 20241128-scx_lockdep-3fa87553609d
Best regards,
--
Breno Leitao <leitao@debian.org>
Hi,
On Thu, Nov 28, 2024 at 04:16:25AM -0800, Breno Leitao wrote:
> Move the hash table growth check and work scheduling outside the
> rht lock to prevent a possible circular locking dependency.
>
> The original implementation could trigger a lockdep warning due to
> a potential deadlock scenario involving nested locks between
> rhashtable bucket, rq lock, and dsq lock. By relocating the
> growth check and work scheduling after releasing the rth lock, we break
> this potential deadlock chain.
>
> This change expands the flexibility of rhashtable by removing
> restrictive locking that previously limited its use in scheduler
> and workqueue contexts.
>
> Import to say that this calls rht_grow_above_75(), which reads from
> struct rhashtable without holding the lock, if this is a problem, we can
> move the check to the lock, and schedule the workqueue after the lock.
>
> Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class")
> Suggested-by: Tejun Heo <tj@kernel.org>
> Signed-off-by: Breno Leitao <leitao@debian.org>
With this patch in linux-next, I get some unit test errors.
[ 3.800185] # Subtest: hw_breakpoint
[ 3.800469] # module: hw_breakpoint_test
[ 3.800718] 1..9
[ 3.810825] # test_one_cpu: pass:1 fail:0 skip:0 total:1
[ 3.810950] ok 1 test_one_cpu
[ 3.814941] # test_many_cpus: pass:1 fail:0 skip:0 total:1
[ 3.815092] ok 2 test_many_cpus
[ 3.822977] # test_one_task_on_all_cpus: pass:1 fail:0 skip:0 total:1
[ 3.823100] ok 3 test_one_task_on_all_cpus
[ 3.829071] # test_two_tasks_on_all_cpus: pass:1 fail:0 skip:0 total:1
[ 3.829199] ok 4 test_two_tasks_on_all_cpus
[ 3.830914] # test_one_task_on_one_cpu: ASSERTION FAILED at kernel/events/hw_breakpoint_test.c:70
[ 3.830914] Expected IS_ERR(bp) to be false, but is true
[ 3.832572] # test_one_task_on_one_cpu: EXPECTATION FAILED at kernel/events/hw_breakpoint_test.c:320
[ 3.832572] Expected hw_breakpoint_is_used() to be false, but is true
[ 3.833002] # test_one_task_on_one_cpu: pass:0 fail:1 skip:0 total:1
[ 3.833071] not ok 5 test_one_task_on_one_cpu
[ 3.834994] # test_one_task_mixed: EXPECTATION FAILED at kernel/events/hw_breakpoint_test.c:320
[ 3.834994] Expected hw_breakpoint_is_used() to be false, but is true
[ 3.835583] # test_one_task_mixed: pass:0 fail:1 skip:0 total:1
[ 3.835638] not ok 6 test_one_task_mixed
[ 3.837131] # test_two_tasks_on_one_cpu: EXPECTATION FAILED at kernel/events/hw_breakpoint_test.c:320
[ 3.837131] Expected hw_breakpoint_is_used() to be false, but is true
[ 3.837827] # test_two_tasks_on_one_cpu: pass:0 fail:1 skip:0 total:1
[ 3.837882] not ok 7 test_two_tasks_on_one_cpu
[ 3.839868] # test_two_tasks_on_one_all_cpus: EXPECTATION FAILED at kernel/events/hw_breakpoint_test.c:320
[ 3.839868] Expected hw_breakpoint_is_used() to be false, but is true
[ 3.840294] # test_two_tasks_on_one_all_cpus: pass:0 fail:1 skip:0 total:1
[ 3.840538] not ok 8 test_two_tasks_on_one_all_cpus
[ 3.843599] # test_task_on_all_and_one_cpu: EXPECTATION FAILED at kernel/events/hw_breakpoint_test.c:320
[ 3.843599] Expected hw_breakpoint_is_used() to be false, but is true
[ 3.844163] # test_task_on_all_and_one_cpu: pass:0 fail:1 skip:0 total:1
[ 3.844215] not ok 9 test_task_on_all_and_one_cpu
[ 3.844453] # hw_breakpoint: pass:4 fail:5 skip:0 total:9
[ 3.844610] # Totals: pass:4 fail:5 skip:0 total:9
[ 3.844797] not ok 1 hw_breakpoint
Sometimes I also see:
[ 12.579842] # Subtest: Handshake API tests
[ 12.579971] 1..11
[ 12.580052] KTAP version 1
[ 12.580410] # Subtest: req_alloc API fuzzing
[ 12.582206] ok 1 handshake_req_alloc NULL proto
[ 12.583541] ok 2 handshake_req_alloc CLASS_NONE
[ 12.585419] ok 3 handshake_req_alloc CLASS_MAX
[ 12.587291] ok 4 handshake_req_alloc no callbacks
[ 12.589239] ok 5 handshake_req_alloc no done callback
[ 12.590758] ok 6 handshake_req_alloc excessive privsize
[ 12.592642] ok 7 handshake_req_alloc all good
[ 12.592802] # req_alloc API fuzzing: pass:7 fail:0 skip:0 total:7
[ 12.593185] ok 1 req_alloc API fuzzing
[ 12.597371] # req_submit NULL req arg: pass:1 fail:0 skip:0 total:1
[ 12.597501] ok 2 req_submit NULL req arg
[ 12.599208] # req_submit NULL sock arg: pass:1 fail:0 skip:0 total:1
[ 12.599338] ok 3 req_submit NULL sock arg
[ 12.601549] # req_submit NULL sock->file: pass:1 fail:0 skip:0 total:1
[ 12.601680] ok 4 req_submit NULL sock->file
[ 12.605334] # req_lookup works: pass:1 fail:0 skip:0 total:1
[ 12.605469] ok 5 req_lookup works
[ 12.609596] # req_submit max pending: pass:1 fail:0 skip:0 total:1
[ 12.609730] ok 6 req_submit max pending
[ 12.613796] # req_submit multiple: pass:1 fail:0 skip:0 total:1
[ 12.614250] ok 7 req_submit multiple
[ 12.616395] # req_cancel before accept: ASSERTION FAILED at net/handshake/handshake-test.c:333
[ 12.616395] Expected err == 0, but
[ 12.616395] err == -16 (0xfffffffffffffff0)
[ 12.618061] # req_cancel before accept: pass:0 fail:1 skip:0 total:1
[ 12.618135] not ok 8 req_cancel before accept
[ 12.619437] # req_cancel after accept: ASSERTION FAILED at net/handshake/handshake-test.c:369
[ 12.619437] Expected err == 0, but
[ 12.619437] err == -16 (0xfffffffffffffff0)
[ 12.621055] # req_cancel after accept: pass:0 fail:1 skip:0 total:1
[ 12.621119] not ok 9 req_cancel after accept
[ 12.622342] # req_cancel after done: ASSERTION FAILED at net/handshake/handshake-test.c:411
[ 12.622342] Expected err == 0, but
[ 12.622342] err == -16 (0xfffffffffffffff0)
[ 12.623547] # req_cancel after done: pass:0 fail:1 skip:0 total:1
[ 12.623608] not ok 10 req_cancel after done
[ 12.625297] # req_destroy works: ASSERTION FAILED at net/handshake/handshake-test.c:469
[ 12.625297] Expected err == 0, but
[ 12.625297] err == -16 (0xfffffffffffffff0)
[ 12.626633] # req_destroy works: pass:0 fail:1 skip:0 total:1
[ 12.626696] not ok 11 req_destroy works
[ 12.626837] # Handshake API tests: pass:7 fail:4 skip:0 total:11
[ 12.627298] # Totals: pass:13 fail:4 skip:0 total:17
[ 12.627446] not ok 90 Handshake API tests
The log is from a test with x86_64, but other architectures are affected
as well. Reverting this patch fixes the problem.
Bisect log is attached for reference.
Guenter
---
# bad: [37136bf5c3a6f6b686d74f41837a6406bec6b7bc] Add linux-next specific files for 20250113
# good: [9d89551994a430b50c4fffcb1e617a057fa76e20] Linux 6.13-rc6
git bisect start 'HEAD' 'v6.13-rc6'
# good: [25dcaaf9b3bdaa117b8eb722ebde76ec9ed30038] Merge branch 'main' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
git bisect good 25dcaaf9b3bdaa117b8eb722ebde76ec9ed30038
# bad: [c6ab5ee56509953c3ee6647ac9f266a7c628f082] Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
git bisect bad c6ab5ee56509953c3ee6647ac9f266a7c628f082
# good: [39388d53c57be95eafb0ce1d81d0ec6bd2f6f42d] Merge tag 'cgroup-dmem-drm-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/mripard/linux into drm-next
git bisect good 39388d53c57be95eafb0ce1d81d0ec6bd2f6f42d
# bad: [0f8b2b2250abe043cef890caa378bebe5c4f5d88] Merge branch 'for-next' of https://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git
git bisect bad 0f8b2b2250abe043cef890caa378bebe5c4f5d88
# good: [67fcb0469b17071890761d437bdf83d2e2d14575] Merge branch 'spi-nor/next' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git
git bisect good 67fcb0469b17071890761d437bdf83d2e2d14575
# bad: [7fd4a4e4c397fc315f8ebf0fb22e50526f66580a] Merge branch 'drm-next' of https://gitlab.freedesktop.org/agd5f/linux
git bisect bad 7fd4a4e4c397fc315f8ebf0fb22e50526f66580a
# good: [e2c4c6c10542ccfe4a0830bb6c9fd5b177b7bbb7] drm/amd/display: Initialize denominator defaults to 1
git bisect good e2c4c6c10542ccfe4a0830bb6c9fd5b177b7bbb7
# bad: [5b7981c1ca61ca7ad7162cfe95bf271d001d29ac] crypto: x86/aes-xts - use .irp when useful
git bisect bad 5b7981c1ca61ca7ad7162cfe95bf271d001d29ac
# good: [ce8fd0500b741b3669c246cc604f1f2343cdd6fd] crypto: qce - use __free() for a buffer that's always freed
git bisect good ce8fd0500b741b3669c246cc604f1f2343cdd6fd
# good: [5e252f490c1c2c989cdc2ca50744f30fbca356b4] crypto: tea - stop using cra_alignmask
git bisect good 5e252f490c1c2c989cdc2ca50744f30fbca356b4
# good: [f916e44487f56df4827069ff3a2070c0746dc511] crypto: keywrap - remove assignment of 0 to cra_alignmask
git bisect good f916e44487f56df4827069ff3a2070c0746dc511
# bad: [b9b894642fede191d50230d08608bd4f4f49f73d] crypto: lib/gf128mul - Remove some bbe deadcode
git bisect bad b9b894642fede191d50230d08608bd4f4f49f73d
# bad: [e1d3422c95f003eba241c176adfe593c33e8a8f6] rhashtable: Fix potential deadlock by moving schedule_work outside lock
git bisect bad e1d3422c95f003eba241c176adfe593c33e8a8f6
# first bad commit: [e1d3422c95f003eba241c176adfe593c33e8a8f6] rhashtable: Fix potential deadlock by moving schedule_work outside lock
On Mon, Jan 13, 2025 at 11:50:36AM -0800, Guenter Roeck wrote: > > With this patch in linux-next, I get some unit test errors. Thanks! This patch should fix the problem. https://patchwork.kernel.org/project/linux-crypto/patch/Z4XWx5X0doetOJni@gondor.apana.org.au/ -- Email: Herbert Xu <herbert@gondor.apana.org.au> Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Thu, Nov 28, 2024 at 04:16:25AM -0800, Breno Leitao wrote:
>
> diff --git a/lib/rhashtable.c b/lib/rhashtable.c
> index 6c902639728b767cc3ee42c61256d2e9618e6ce7..5a27ccd72db9a25d92d1ed2f8d519afcfc672afe 100644
> --- a/lib/rhashtable.c
> +++ b/lib/rhashtable.c
> @@ -585,9 +585,6 @@ static struct bucket_table *rhashtable_insert_one(
> rht_assign_locked(bkt, obj);
>
> atomic_inc(&ht->nelems);
> - if (rht_grow_above_75(ht, tbl))
> - schedule_work(&ht->run_work);
> -
> return NULL;
> }
>
> @@ -624,6 +621,9 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
> data = ERR_CAST(new_tbl);
>
> rht_unlock(tbl, bkt, flags);
> + if (rht_grow_above_75(ht, tbl))
> + schedule_work(&ht->run_work);
> +
The growth check should stay with the atomic_inc. Something like
this should work:
if (PTR_ERR(data) == -ENOENT && !new_tbl) {
atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
break;
}
Could you please resend this via linux-crypto?
Thanks,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Thu, Dec 12, 2024 at 08:33:31PM +0800, Herbert Xu wrote: > > The growth check should stay with the atomic_inc. Something like > this should work: OK I've applied your patch with the atomic_inc move. Thanks, -- Email: Herbert Xu <herbert@gondor.apana.org.au> Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Sat, Dec 21, 2024 at 05:06:55PM +0800, Herbert Xu wrote: > On Thu, Dec 12, 2024 at 08:33:31PM +0800, Herbert Xu wrote: > > > > The growth check should stay with the atomic_inc. Something like > > this should work: > > OK I've applied your patch with the atomic_inc move. Sorry, I was on vacation, and I am back now. Let me know if you need anything further. Thanks for fixing it, --breno
On Thu, Nov 28, 2024 at 04:16:25AM -0800, Breno Leitao wrote:
> Move the hash table growth check and work scheduling outside the
> rht lock to prevent a possible circular locking dependency.
>
> The original implementation could trigger a lockdep warning due to
> a potential deadlock scenario involving nested locks between
> rhashtable bucket, rq lock, and dsq lock. By relocating the
> growth check and work scheduling after releasing the rth lock, we break
> this potential deadlock chain.
>
> This change expands the flexibility of rhashtable by removing
> restrictive locking that previously limited its use in scheduler
> and workqueue contexts.
>
> Import to say that this calls rht_grow_above_75(), which reads from
> struct rhashtable without holding the lock, if this is a problem, we can
> move the check to the lock, and schedule the workqueue after the lock.
>
> Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class")
> Suggested-by: Tejun Heo <tj@kernel.org>
> Signed-off-by: Breno Leitao <leitao@debian.org>
Acked-by: Tejun Heo <tj@kernel.org>
This solves a possible deadlock for sched_ext and makes rhashtable more
useful and I don't see any downsides.
Andrew, can you please pick up this one?
Thanks.
--
tejun
On Tue, Dec 03, 2024 at 10:16:24AM -1000, Tejun Heo wrote: > > This solves a possible deadlock for sched_ext and makes rhashtable more > useful and I don't see any downsides. > > Andrew, can you please pick up this one? I will be taking this through my tree once I've reviewed fully. Thanks for your patience. -- Email: Herbert Xu <herbert@gondor.apana.org.au> Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
On Thu, Nov 28, 2024 at 04:16:25AM -0800, Breno Leitao wrote: > Move the hash table growth check and work scheduling outside the > rht lock to prevent a possible circular locking dependency. > > The original implementation could trigger a lockdep warning due to > a potential deadlock scenario involving nested locks between > rhashtable bucket, rq lock, and dsq lock. By relocating the > growth check and work scheduling after releasing the rth lock, we break > this potential deadlock chain. > > This change expands the flexibility of rhashtable by removing > restrictive locking that previously limited its use in scheduler > and workqueue contexts. Could you please explain the deadlock? Is the workqueue system actually using rhashtable? Thanks, -- Email: Herbert Xu <herbert@gondor.apana.org.au> Home Page: http://gondor.apana.org.au/~herbert/ PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
Hello Herbert,
On Fri, Nov 29, 2024 at 01:47:42PM +0800, Herbert Xu wrote:
> On Thu, Nov 28, 2024 at 04:16:25AM -0800, Breno Leitao wrote:
> > Move the hash table growth check and work scheduling outside the
> > rht lock to prevent a possible circular locking dependency.
> >
> > The original implementation could trigger a lockdep warning due to
> > a potential deadlock scenario involving nested locks between
> > rhashtable bucket, rq lock, and dsq lock. By relocating the
> > growth check and work scheduling after releasing the rth lock, we break
> > this potential deadlock chain.
> >
> > This change expands the flexibility of rhashtable by removing
> > restrictive locking that previously limited its use in scheduler
> > and workqueue contexts.
>
> Could you please explain the deadlock?
I understand that there is a locking order inversion here:
A possible code flow can hold the rhashtable_bucket, and then can get
the dqs->lock, as shown int eh following snippet:
Chain exists of:
rhashtable_bucket --> &rq->__lock --> &dsq->lock
The same is true, when sched_ext holds rthe dsq->lock and try to get
hold of rhashtable lock.
This could be seen in the following snippers:
rht_lock+0x69/0xd0
destroy_dsq+0x22d/0x790
scx_ops_disable_workfn+0x9d2/0xaf0
> Is the workqueue system actually using rhashtable?
It seems so when using sched_ext scheduler class. For instance, lockdep
got it in scx_ops_disable_workfn().
rht_lock+0x69/0xd0
destroy_dsq+0x22d/0x790
scx_ops_disable_workfn+0x9d2/0xaf0
kthread_worker_fn+0x137/0x350
This is the full lockdep splat, if it helps. Sorry it is not decoded,
but this can give you the code-flow.
======================================================
WARNING: possible circular locking dependency detected
hardirqs last enabled at (2088145): [<ffffffff822ab674>] _raw_spin_unlock_irq+0x24/0x50
hardirqs last disabled at (2088144): [<ffffffff822ab4bf>] _raw_spin_lock_irq+0x2f/0x80
------------------------------------------------------
softirqs last enabled at (2088116): [<ffffffff810f7294>] __irq_exit_rcu+0x74/0x100
sched_ext_ops_h/10451 is trying to acquire lock:
softirqs last disabled at (2088111): [<ffffffff810f7294>] __irq_exit_rcu+0x74/0x100
ffff888288059038 (rhashtable_bucket){....}-{0:0}, at: rht_lock+0x51/0xd0
but task is already holding lock:
ffff888470645698 (&dsq->lock){-.-.}-{2:2}, at: destroy_dsq+0xaf/0x790
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #4 (&dsq->lock){-.-.}-{2:2}:
_raw_spin_lock+0x2f/0x40
dispatch_enqueue+0x7c/0x3e0
enqueue_task_scx.llvm.3416789782249720787+0x1ae/0x250
sched_enq_and_set_task+0x5f/0xb0
bpf_scx_reg+0xf21/0x11b0
bpf_struct_ops_link_create+0xec/0x160
__sys_bpf+0x34d/0x3b0
__x64_sys_bpf+0x18/0x20
do_syscall_64+0x7e/0x150
entry_SYSCALL_64_after_hwframe+0x4b/0x53
-> #3 (&rq->__lock){-.-.}-{2:2}:
_raw_spin_lock_nested+0x32/0x40
raw_spin_rq_lock_nested+0x20/0x30
task_fork_fair.llvm.5382994275699419189+0x3b/0x110
sched_cgroup_fork+0xe3/0x100
copy_process+0xc3c/0x14a0
kernel_clone+0x90/0x360
user_mode_thread+0xbc/0xe0
rest_init+0x1f/0x1f0
start_kernel+0x41b/0x470
x86_64_start_reservations+0x26/0x30
x86_64_start_kernel+0x9b/0xa0
common_startup_64+0x13e/0x140
-> #2 (&p->pi_lock){-.-.}-{2:2}:
_raw_spin_lock_irqsave+0x5a/0x90
try_to_wake_up+0x58/0x730
create_worker+0x1d6/0x240
workqueue_init+0x2c0/0x390
kernel_init_freeable+0x147/0x200
kernel_init+0x16/0x1c0
ret_from_fork+0x2f/0x40
ret_from_fork_asm+0x11/0x20
-> #1 (&pool->lock){-.-.}-{2:2}:
_raw_spin_lock+0x2f/0x40
__queue_work+0x24b/0x610
queue_work_on+0xa5/0xf0
rhashtable_insert_slow+0x524/0x970
__xdp_reg_mem_model+0x181/0x240
xdp_rxq_info_reg_mem_model+0x19/0xf0
bnxt_alloc_mem+0x1178/0x1c80
__bnxt_open_nic+0x1bb/0xe20
bnxt_open_nic+0x26/0x60
ethtool_set_channels+0x1b7/0x1f0
dev_ethtool+0x555/0x740
dev_ioctl+0x2ac/0x3f0
sock_do_ioctl+0x111/0x180
sock_ioctl+0x1fb/0x2e0
__se_sys_ioctl+0x72/0xc0
do_syscall_64+0x7e/0x150
entry_SYSCALL_64_after_hwframe+0x4b/0x53
-> #0 (rhashtable_bucket){....}-{0:0}:
__lock_acquire+0x1742/0x3470
lock_acquire+0xf0/0x290
rht_lock+0x69/0xd0
destroy_dsq+0x22d/0x790
scx_ops_disable_workfn+0x9d2/0xaf0
kthread_worker_fn+0x137/0x350
kthread+0x102/0x120
ret_from_fork+0x2f/0x40
ret_from_fork_asm+0x11/0x20
other info that might help us debug this:
Chain exists of:
rhashtable_bucket --> &rq->__lock --> &dsq->lock
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(&dsq->lock);
lock(&rq->__lock);
lock(&dsq->lock);
lock(rhashtable_bucket);
*** DEADLOCK ***
5 locks held by sched_ext_ops_h/10451:
#0: ffffffff83695ad0 (scx_ops_enable_mutex){+.+.}-{3:3}, at: scx_ops_disable_workfn+0x111/0xaf0
#1: ffffffff83da13d8 (rcu_read_lock){....}-{1:2}, at: rhashtable_walk_start_check+0x1f/0x3e0
#2: ffffffff83da13d8 (rcu_read_lock){....}-{1:2}, at: destroy_dsq+0x30/0x790
#3: ffff888470645698 (&dsq->lock){-.-.}-{2:2}, at: destroy_dsq+0xaf/0x790
#4: ffffffff83da13d8 (rcu_read_lock){....}-{1:2}, at: destroy_dsq+0x30/0x790
© 2016 - 2026 Red Hat, Inc.