kernel/locking/rwsem.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+)
From: "buck.zhang" <buckzhang1212@yeah.net>
Here is a kernel exception about rwsem and I can recreate it stably.
First We define a struct contains a rwsem.
Then allocate this struct by kmalloc without calling init_rwsem.
Finally when multiple tasks call use rwsem together,kernel will panic.
This lock is used normally when only one task is using at a time.
the exception reason is that sem->wait_list is an invalid kernel list.
I want to add more warnings when enable CONFIG_DEBUG_RWSEMS
kernel crash log:
Unable to handle kernel NULL pointer dereference at virtual address 0000000
pc: rwsem_down_write_slowpath+0x428/0xccc
lr: rwsem_down_write_slowpath+0x844/0xccc
sp: ffffffc0870abb00
x29: ffffffc0870abb60 x28: 0000000000000000 x27: ffffffffffffff05
........
x2: ffffff809d57d830 x1 : 0000000000000000 x0 : 0000000000000000
Call trace:
rwsem_down_write_slowpath+0x428/0xccc
down_write+0xa8/0x108
Test case:
struct chip_rwsema {
struct rwsema sem;
};
static void work_handler1(struct chip_rwsema *csem)
{
down_write(&(csem->sem));
}
static void work_handler2(struct chip_rwsema *csem)
{
down_write(&(csem->sem));
}
static void chip_tsem(void)
{
struct chip_rwsema *csem;
csem = kzalloc(sizeof(struct chip_rwsema),GFP_KERNEL);
work_handler1(csem);
......
work_handler2(csem);
}
Signed-off-by: buck.zhang <buckzhang1212@yeah.net>
---
kernel/locking/rwsem.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 24df4d98f..bfc038573 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -75,7 +75,17 @@
list_empty(&(sem)->wait_list) ? "" : "not ")) \
debug_locks_off(); \
} while (0)
+
+/*
+ * list_invalid - check whether sem->waitlist is invalid
+ * @head: the list to test.
+ */
+static inline int rwsem_waitlist_invalid(const struct list_head *head)
+{
+ return (unsigned long) READ_ONCE(head->next) < PAGE_OFFSET;
+}
#else
+# define rwsem_waitlist_invalid(sem)
# define DEBUG_RWSEMS_WARN_ON(c, sem)
#endif
@@ -1034,6 +1044,9 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
waiter.handoff_set = false;
+ /* In case the rwsem is uninitiated, add more warning */
+ DEBUG_RWSEMS_WARN_ON(rwsem_waitlist_invalid(&sem->wait_list), sem);
+
raw_spin_lock_irq(&sem->wait_lock);
if (list_empty(&sem->wait_list)) {
/*
@@ -1128,6 +1141,9 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
waiter.handoff_set = false;
+ /* In case the rwsem is uninitiated, add more warning */
+ DEBUG_RWSEMS_WARN_ON(rwsem_waitlist_invalid(&sem->wait_list), sem);
+
raw_spin_lock_irq(&sem->wait_lock);
rwsem_add_waiter(sem, &waiter);
--
2.17.1
On 9/27/25 2:47 AM, buckzhang1212@yeah.net wrote:
> From: "buck.zhang" <buckzhang1212@yeah.net>
>
> Here is a kernel exception about rwsem and I can recreate it stably.
> First We define a struct contains a rwsem.
> Then allocate this struct by kmalloc without calling init_rwsem.
> Finally when multiple tasks call use rwsem together,kernel will panic.
> This lock is used normally when only one task is using at a time.
> the exception reason is that sem->wait_list is an invalid kernel list.
> I want to add more warnings when enable CONFIG_DEBUG_RWSEMS
> kernel crash log:
> Unable to handle kernel NULL pointer dereference at virtual address 0000000
> pc: rwsem_down_write_slowpath+0x428/0xccc
> lr: rwsem_down_write_slowpath+0x844/0xccc
> sp: ffffffc0870abb00
> x29: ffffffc0870abb60 x28: 0000000000000000 x27: ffffffffffffff05
> ........
> x2: ffffff809d57d830 x1 : 0000000000000000 x0 : 0000000000000000
> Call trace:
> rwsem_down_write_slowpath+0x428/0xccc
> down_write+0xa8/0x108
> Test case:
> struct chip_rwsema {
> struct rwsema sem;
> };
> static void work_handler1(struct chip_rwsema *csem)
> {
> down_write(&(csem->sem));
> }
> static void work_handler2(struct chip_rwsema *csem)
> {
> down_write(&(csem->sem));
> }
> static void chip_tsem(void)
> {
> struct chip_rwsema *csem;
> csem = kzalloc(sizeof(struct chip_rwsema),GFP_KERNEL);
> work_handler1(csem);
> ......
> work_handler2(csem);
> }
>
> Signed-off-by: buck.zhang <buckzhang1212@yeah.net>
> ---
> kernel/locking/rwsem.c | 16 ++++++++++++++++
> 1 file changed, 16 insertions(+)
>
> diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
> index 24df4d98f..bfc038573 100644
> --- a/kernel/locking/rwsem.c
> +++ b/kernel/locking/rwsem.c
> @@ -75,7 +75,17 @@
> list_empty(&(sem)->wait_list) ? "" : "not ")) \
> debug_locks_off(); \
> } while (0)
> +
> +/*
> + * list_invalid - check whether sem->waitlist is invalid
> + * @head: the list to test.
> + */
> +static inline int rwsem_waitlist_invalid(const struct list_head *head)
> +{
> + return (unsigned long) READ_ONCE(head->next) < PAGE_OFFSET;
> +}
> #else
> +# define rwsem_waitlist_invalid(sem)
> # define DEBUG_RWSEMS_WARN_ON(c, sem)
> #endif
>
> @@ -1034,6 +1044,9 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
> waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
> waiter.handoff_set = false;
>
> + /* In case the rwsem is uninitiated, add more warning */
> + DEBUG_RWSEMS_WARN_ON(rwsem_waitlist_invalid(&sem->wait_list), sem);
> +
> raw_spin_lock_irq(&sem->wait_lock);
> if (list_empty(&sem->wait_list)) {
> /*
> @@ -1128,6 +1141,9 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
> waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
> waiter.handoff_set = false;
>
> + /* In case the rwsem is uninitiated, add more warning */
> + DEBUG_RWSEMS_WARN_ON(rwsem_waitlist_invalid(&sem->wait_list), sem);
> +
> raw_spin_lock_irq(&sem->wait_lock);
> rwsem_add_waiter(sem, &waiter);
>
I suppose you are trying to detect call to down_read()/down_write()
before the rwsem has been properly initialized. The current way to
detect that is to check for sem->magic. Unfortunately, this check may
not be run for down_read()/down_write() depending on the whether
CONFIG_LOCK_STAT is set or not. Does the below patch work for you for
your test case?
Cheers,
Longman
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 24df4d98f7d2..7e06a6bbdd53 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -1256,6 +1256,7 @@ static __always_inline int
__down_read_common(struct rw_semaphore *sem, int stat
int ret = 0;
long count;
+ DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
preempt_disable();
if (!rwsem_read_trylock(sem, &count)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) {
@@ -1312,6 +1313,7 @@ static __always_inline int
__down_write_common(struct rw_semaphore *sem, int sta
{
int ret = 0;
+ DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
preempt_disable();
if (unlikely(!rwsem_write_trylock(sem))) {
if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
© 2016 - 2026 Red Hat, Inc.