fs/jfs/jfs_logmgr.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-)
In lbmRead(), the I/O event waited for by wait_event() finishes before
it goes to sleep, and the lbmIODone() prematurely sets the flag to
lbmDONE, thus ending the wait. This causes wait_event() to return before
lbmREAD is cleared (because lbmDONE was set first), the premature return
of wait_event() leads to the release of lbuf before lbmIODone() returns,
thus triggering the use-after-free vulnerability reported in [1].
Moving the operation of setting the lbmDONE flag to after clearing lbmREAD
in lbmIODone() avoids the use-after-free vulnerability reported in [1].
[1]
BUG: KASAN: slab-use-after-free in rt_spin_lock+0x88/0x3e0 kernel/locking/spinlock_rt.c:56
Call Trace:
blk_update_request+0x57e/0xe60 block/blk-mq.c:1007
blk_mq_end_request+0x3e/0x70 block/blk-mq.c:1169
blk_complete_reqs block/blk-mq.c:1244 [inline]
blk_done_softirq+0x10a/0x160 block/blk-mq.c:1249
Allocated by task 6101:
lbmLogInit fs/jfs/jfs_logmgr.c:1821 [inline]
lmLogInit+0x3d0/0x19e0 fs/jfs/jfs_logmgr.c:1269
open_inline_log fs/jfs/jfs_logmgr.c:1175 [inline]
lmLogOpen+0x4e1/0xfa0 fs/jfs/jfs_logmgr.c:1069
jfs_mount_rw+0xe9/0x670 fs/jfs/jfs_mount.c:257
jfs_fill_super+0x754/0xd80 fs/jfs/super.c:532
Freed by task 6101:
kfree+0x1bd/0x900 mm/slub.c:6876
lbmLogShutdown fs/jfs/jfs_logmgr.c:1864 [inline]
lmLogInit+0x1137/0x19e0 fs/jfs/jfs_logmgr.c:1415
open_inline_log fs/jfs/jfs_logmgr.c:1175 [inline]
lmLogOpen+0x4e1/0xfa0 fs/jfs/jfs_logmgr.c:1069
jfs_mount_rw+0xe9/0x670 fs/jfs/jfs_mount.c:257
jfs_fill_super+0x754/0xd80 fs/jfs/super.c:532
Reported-by: syzbot+1d38eedcb25a3b5686a7@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=1d38eedcb25a3b5686a7
Signed-off-by: Edward Adam Davis <eadavis@qq.com>
---
fs/jfs/jfs_logmgr.c | 18 ++++++------------
1 file changed, 6 insertions(+), 12 deletions(-)
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index b343c5ea1159..dda9ffa8eaf5 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2180,8 +2180,6 @@ static void lbmIODone(struct bio *bio)
LCACHE_LOCK(flags); /* disable+lock */
- bp->l_flag |= lbmDONE;
-
if (bio->bi_status) {
bp->l_flag |= lbmERROR;
@@ -2196,12 +2194,10 @@ static void lbmIODone(struct bio *bio)
if (bp->l_flag & lbmREAD) {
bp->l_flag &= ~lbmREAD;
- LCACHE_UNLOCK(flags); /* unlock+enable */
-
/* wakeup I/O initiator */
LCACHE_WAKEUP(&bp->l_ioevent);
- return;
+ goto out;
}
/*
@@ -2225,8 +2221,7 @@ static void lbmIODone(struct bio *bio)
if (bp->l_flag & lbmDIRECT) {
LCACHE_WAKEUP(&bp->l_ioevent);
- LCACHE_UNLOCK(flags);
- return;
+ goto out;
}
tail = log->wqueue;
@@ -2278,8 +2273,6 @@ static void lbmIODone(struct bio *bio)
* leave buffer for i/o initiator to dispose
*/
if (bp->l_flag & lbmSYNC) {
- LCACHE_UNLOCK(flags); /* unlock+enable */
-
/* wakeup I/O initiator */
LCACHE_WAKEUP(&bp->l_ioevent);
}
@@ -2288,7 +2281,6 @@ static void lbmIODone(struct bio *bio)
* Group Commit pageout:
*/
else if (bp->l_flag & lbmGC) {
- LCACHE_UNLOCK(flags);
lmPostGC(bp);
}
@@ -2302,9 +2294,11 @@ static void lbmIODone(struct bio *bio)
assert(bp->l_flag & lbmRELEASE);
assert(bp->l_flag & lbmFREE);
lbmfree(bp);
-
- LCACHE_UNLOCK(flags); /* unlock+enable */
}
+
+out:
+ bp->l_flag |= lbmDONE;
+ LCACHE_UNLOCK(flags);
}
int jfsIOWait(void *arg)
--
2.43.0
syzbot ci has tested the following series
[v1] jfs: Extend the done of the window period
https://lore.kernel.org/all/tencent_2AC2ECAACC587B4E6C342D096F909424E90A@qq.com
* [PATCH] jfs: Extend the done of the window period
and found the following issue:
possible deadlock in lbmIODone
Full report is available here:
https://ci.syzbot.org/series/49387e77-608d-493c-9978-8d1e9ab79507
***
possible deadlock in lbmIODone
tree: torvalds
URL: https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
base: d358e5254674b70f34c847715ca509e46eb81e6f
arch: amd64
compiler: Debian clang version 20.1.8 (++20250708063551+0c9f909b7976-1~exp1~20250708183702.136), Debian LLD 20.1.8
config: https://ci.syzbot.org/builds/802e00bf-1926-4ea9-a853-4f01d10a4a6e/config
C repro: https://ci.syzbot.org/findings/784b824b-3582-4c98-a807-ff28792ecaac/c_repro
syz repro: https://ci.syzbot.org/findings/784b824b-3582-4c98-a807-ff28792ecaac/syz_repro
======================================================
WARNING: possible circular locking dependency detected
syzkaller #0 Not tainted
------------------------------------------------------
ksoftirqd/0/15 is trying to acquire lock:
ffff888112c1f9e8 (&(log)->gclock){..-.}-{3:3}, at: lmPostGC fs/jfs/jfs_logmgr.c:810 [inline]
ffff888112c1f9e8 (&(log)->gclock){..-.}-{3:3}, at: lbmIODone+0x681/0x17b0 fs/jfs/jfs_logmgr.c:2284
but task is already holding lock:
ffffffff8e396158 (jfsLCacheLock){..-.}-{3:3}, at: lbmIODone+0x92/0x17b0 fs/jfs/jfs_logmgr.c:2181
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #1 (jfsLCacheLock){..-.}-{3:3}:
__raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline]
_raw_spin_lock_irqsave+0xa7/0xf0 kernel/locking/spinlock.c:162
lbmWrite+0x115/0x490 fs/jfs/jfs_logmgr.c:2022
lmGCwrite fs/jfs/jfs_logmgr.c:-1 [inline]
lmGroupCommit+0x570/0xb30 fs/jfs/jfs_logmgr.c:687
txCommit+0x4940/0x5430 fs/jfs/jfs_txnmgr.c:1305
diNewIAG fs/jfs/jfs_imap.c:2592 [inline]
diAllocExt fs/jfs/jfs_imap.c:1905 [inline]
diAllocAG+0x1770/0x1df0 fs/jfs/jfs_imap.c:1669
diAlloc+0x1d5/0x1680 fs/jfs/jfs_imap.c:1590
ialloc+0x8c/0x8f0 fs/jfs/jfs_inode.c:56
jfs_mkdir+0x193/0xa70 fs/jfs/namei.c:225
vfs_mkdir+0x512/0x5b0 fs/namei.c:5130
do_mkdirat+0x276/0x4b0 fs/namei.c:5164
__do_sys_mkdirat fs/namei.c:5186 [inline]
__se_sys_mkdirat fs/namei.c:5184 [inline]
__x64_sys_mkdirat+0x87/0xa0 fs/namei.c:5184
do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
do_syscall_64+0xfa/0xf80 arch/x86/entry/syscall_64.c:94
entry_SYSCALL_64_after_hwframe+0x77/0x7f
-> #0 (&(log)->gclock){..-.}-{3:3}:
check_prev_add kernel/locking/lockdep.c:3165 [inline]
check_prevs_add kernel/locking/lockdep.c:3284 [inline]
validate_chain kernel/locking/lockdep.c:3908 [inline]
__lock_acquire+0x15a6/0x2cf0 kernel/locking/lockdep.c:5237
lock_acquire+0x117/0x340 kernel/locking/lockdep.c:5868
__raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline]
_raw_spin_lock_irqsave+0xa7/0xf0 kernel/locking/spinlock.c:162
lmPostGC fs/jfs/jfs_logmgr.c:810 [inline]
lbmIODone+0x681/0x17b0 fs/jfs/jfs_logmgr.c:2284
blk_update_request+0x57e/0xe60 block/blk-mq.c:1007
blk_mq_end_request+0x3e/0x70 block/blk-mq.c:1169
blk_complete_reqs block/blk-mq.c:1244 [inline]
blk_done_softirq+0x10a/0x160 block/blk-mq.c:1249
handle_softirqs+0x27d/0x850 kernel/softirq.c:622
run_ksoftirqd+0x9b/0x100 kernel/softirq.c:1063
smpboot_thread_fn+0x542/0xa60 kernel/smpboot.c:160
kthread+0x711/0x8a0 kernel/kthread.c:463
ret_from_fork+0x599/0xb30 arch/x86/kernel/process.c:158
ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:246
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(jfsLCacheLock);
lock(&(log)->gclock);
lock(jfsLCacheLock);
lock(&(log)->gclock);
*** DEADLOCK ***
1 lock held by ksoftirqd/0/15:
#0: ffffffff8e396158 (jfsLCacheLock){..-.}-{3:3}, at: lbmIODone+0x92/0x17b0 fs/jfs/jfs_logmgr.c:2181
stack backtrace:
CPU: 0 UID: 0 PID: 15 Comm: ksoftirqd/0 Not tainted syzkaller #0 PREEMPT(full)
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
Call Trace:
<TASK>
dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120
print_circular_bug+0x2e2/0x300 kernel/locking/lockdep.c:2043
check_noncircular+0x12e/0x150 kernel/locking/lockdep.c:2175
check_prev_add kernel/locking/lockdep.c:3165 [inline]
check_prevs_add kernel/locking/lockdep.c:3284 [inline]
validate_chain kernel/locking/lockdep.c:3908 [inline]
__lock_acquire+0x15a6/0x2cf0 kernel/locking/lockdep.c:5237
lock_acquire+0x117/0x340 kernel/locking/lockdep.c:5868
__raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline]
_raw_spin_lock_irqsave+0xa7/0xf0 kernel/locking/spinlock.c:162
lmPostGC fs/jfs/jfs_logmgr.c:810 [inline]
lbmIODone+0x681/0x17b0 fs/jfs/jfs_logmgr.c:2284
blk_update_request+0x57e/0xe60 block/blk-mq.c:1007
blk_mq_end_request+0x3e/0x70 block/blk-mq.c:1169
blk_complete_reqs block/blk-mq.c:1244 [inline]
blk_done_softirq+0x10a/0x160 block/blk-mq.c:1249
handle_softirqs+0x27d/0x850 kernel/softirq.c:622
run_ksoftirqd+0x9b/0x100 kernel/softirq.c:1063
smpboot_thread_fn+0x542/0xa60 kernel/smpboot.c:160
kthread+0x711/0x8a0 kernel/kthread.c:463
ret_from_fork+0x599/0xb30 arch/x86/kernel/process.c:158
ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:246
</TASK>
***
If these findings have caused you to resend the series or submit a
separate fix, please add the following tag to your commit message:
Tested-by: syzbot@syzkaller.appspotmail.com
---
This report is generated by a bot. It may contain errors.
syzbot ci engineers can be reached at syzkaller@googlegroups.com.
In lbmRead(), the I/O event waited for by wait_event() finishes before
it goes to sleep, and the lbmIODone() prematurely sets the flag to
lbmDONE, thus ending the wait. This causes wait_event() to return before
lbmREAD is cleared (because lbmDONE was set first), the premature return
of wait_event() leads to the release of lbuf before lbmIODone() returns,
thus triggering the use-after-free vulnerability reported in [1].
Moving the operation of setting the lbmDONE flag to after clearing lbmREAD
in lbmIODone() avoids the use-after-free vulnerability reported in [1].
[1]
BUG: KASAN: slab-use-after-free in rt_spin_lock+0x88/0x3e0 kernel/locking/spinlock_rt.c:56
Call Trace:
blk_update_request+0x57e/0xe60 block/blk-mq.c:1007
blk_mq_end_request+0x3e/0x70 block/blk-mq.c:1169
blk_complete_reqs block/blk-mq.c:1244 [inline]
blk_done_softirq+0x10a/0x160 block/blk-mq.c:1249
Allocated by task 6101:
lbmLogInit fs/jfs/jfs_logmgr.c:1821 [inline]
lmLogInit+0x3d0/0x19e0 fs/jfs/jfs_logmgr.c:1269
open_inline_log fs/jfs/jfs_logmgr.c:1175 [inline]
lmLogOpen+0x4e1/0xfa0 fs/jfs/jfs_logmgr.c:1069
jfs_mount_rw+0xe9/0x670 fs/jfs/jfs_mount.c:257
jfs_fill_super+0x754/0xd80 fs/jfs/super.c:532
Freed by task 6101:
kfree+0x1bd/0x900 mm/slub.c:6876
lbmLogShutdown fs/jfs/jfs_logmgr.c:1864 [inline]
lmLogInit+0x1137/0x19e0 fs/jfs/jfs_logmgr.c:1415
open_inline_log fs/jfs/jfs_logmgr.c:1175 [inline]
lmLogOpen+0x4e1/0xfa0 fs/jfs/jfs_logmgr.c:1069
jfs_mount_rw+0xe9/0x670 fs/jfs/jfs_mount.c:257
jfs_fill_super+0x754/0xd80 fs/jfs/super.c:532
Reported-by: syzbot+1d38eedcb25a3b5686a7@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=1d38eedcb25a3b5686a7
Signed-off-by: Edward Adam Davis <eadavis@qq.com>
---
v1 -> v2: fix potential deadlock
fs/jfs/jfs_logmgr.c | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index b343c5ea1159..0db4bc9f2d6c 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2180,8 +2180,6 @@ static void lbmIODone(struct bio *bio)
LCACHE_LOCK(flags); /* disable+lock */
- bp->l_flag |= lbmDONE;
-
if (bio->bi_status) {
bp->l_flag |= lbmERROR;
@@ -2196,12 +2194,10 @@ static void lbmIODone(struct bio *bio)
if (bp->l_flag & lbmREAD) {
bp->l_flag &= ~lbmREAD;
- LCACHE_UNLOCK(flags); /* unlock+enable */
-
/* wakeup I/O initiator */
LCACHE_WAKEUP(&bp->l_ioevent);
- return;
+ goto out;
}
/*
@@ -2225,8 +2221,7 @@ static void lbmIODone(struct bio *bio)
if (bp->l_flag & lbmDIRECT) {
LCACHE_WAKEUP(&bp->l_ioevent);
- LCACHE_UNLOCK(flags);
- return;
+ goto out;
}
tail = log->wqueue;
@@ -2278,8 +2273,6 @@ static void lbmIODone(struct bio *bio)
* leave buffer for i/o initiator to dispose
*/
if (bp->l_flag & lbmSYNC) {
- LCACHE_UNLOCK(flags); /* unlock+enable */
-
/* wakeup I/O initiator */
LCACHE_WAKEUP(&bp->l_ioevent);
}
@@ -2290,6 +2283,7 @@ static void lbmIODone(struct bio *bio)
else if (bp->l_flag & lbmGC) {
LCACHE_UNLOCK(flags);
lmPostGC(bp);
+ LCACHE_LOCK(flags); /* disable+lock */
}
/*
@@ -2302,9 +2296,11 @@ static void lbmIODone(struct bio *bio)
assert(bp->l_flag & lbmRELEASE);
assert(bp->l_flag & lbmFREE);
lbmfree(bp);
-
- LCACHE_UNLOCK(flags); /* unlock+enable */
}
+
+out:
+ bp->l_flag |= lbmDONE;
+ LCACHE_UNLOCK(flags);
}
int jfsIOWait(void *arg)
--
2.43.0
© 2016 - 2026 Red Hat, Inc.