The locks kcov->lock and kcov_remote_lock can be acquired from
atomic contexts, such as instrumentation hooks invoked from interrupt
handlers.
On PREEMPT_RT-enabled kernels, spinlock_t is typically implemented
as a sleeping lock (e.g., mapped to an rt_mutex). Acquiring such a
lock in atomic context, where sleeping is not allowed, can lead to
system hangs or crashes.
To avoid this, convert both locks to raw_spinlock_t, which always
provides non-sleeping spinlock semantics regardless of preemption model.
Signed-off-by: Yunseong Kim <ysk@kzalloc.com>
---
kernel/kcov.c | 58 +++++++++++++++++++++++++--------------------------
1 file changed, 29 insertions(+), 29 deletions(-)
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 187ba1b80bda..7d9b53385d81 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -54,7 +54,7 @@ struct kcov {
*/
refcount_t refcount;
/* The lock protects mode, size, area and t. */
- spinlock_t lock;
+ raw_spinlock_t lock;
enum kcov_mode mode;
/* Size of arena (in long's). */
unsigned int size;
@@ -84,7 +84,7 @@ struct kcov_remote {
struct hlist_node hnode;
};
-static DEFINE_SPINLOCK(kcov_remote_lock);
+static DEFINE_RAW_SPINLOCK(kcov_remote_lock);
static DEFINE_HASHTABLE(kcov_remote_map, 4);
static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
@@ -406,7 +406,7 @@ static void kcov_remote_reset(struct kcov *kcov)
struct hlist_node *tmp;
unsigned long flags;
- spin_lock_irqsave(&kcov_remote_lock, flags);
+ raw_spin_lock_irqsave(&kcov_remote_lock, flags);
hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
if (remote->kcov != kcov)
continue;
@@ -415,7 +415,7 @@ static void kcov_remote_reset(struct kcov *kcov)
}
/* Do reset before unlock to prevent races with kcov_remote_start(). */
kcov_reset(kcov);
- spin_unlock_irqrestore(&kcov_remote_lock, flags);
+ raw_spin_unlock_irqrestore(&kcov_remote_lock, flags);
}
static void kcov_disable(struct task_struct *t, struct kcov *kcov)
@@ -450,7 +450,7 @@ void kcov_task_exit(struct task_struct *t)
if (kcov == NULL)
return;
- spin_lock_irqsave(&kcov->lock, flags);
+ raw_spin_lock_irqsave(&kcov->lock, flags);
kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
/*
* For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
@@ -475,12 +475,12 @@ void kcov_task_exit(struct task_struct *t)
* By combining all three checks into one we get:
*/
if (WARN_ON(kcov->t != t)) {
- spin_unlock_irqrestore(&kcov->lock, flags);
+ raw_spin_unlock_irqrestore(&kcov->lock, flags);
return;
}
/* Just to not leave dangling references behind. */
kcov_disable(t, kcov);
- spin_unlock_irqrestore(&kcov->lock, flags);
+ raw_spin_unlock_irqrestore(&kcov->lock, flags);
kcov_put(kcov);
}
@@ -492,14 +492,14 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
struct page *page;
unsigned long flags;
- spin_lock_irqsave(&kcov->lock, flags);
+ raw_spin_lock_irqsave(&kcov->lock, flags);
size = kcov->size * sizeof(unsigned long);
if (kcov->area == NULL || vma->vm_pgoff != 0 ||
vma->vm_end - vma->vm_start != size) {
res = -EINVAL;
goto exit;
}
- spin_unlock_irqrestore(&kcov->lock, flags);
+ raw_spin_unlock_irqrestore(&kcov->lock, flags);
vm_flags_set(vma, VM_DONTEXPAND);
for (off = 0; off < size; off += PAGE_SIZE) {
page = vmalloc_to_page(kcov->area + off);
@@ -511,7 +511,7 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
}
return 0;
exit:
- spin_unlock_irqrestore(&kcov->lock, flags);
+ raw_spin_unlock_irqrestore(&kcov->lock, flags);
return res;
}
@@ -525,7 +525,7 @@ static int kcov_open(struct inode *inode, struct file *filep)
kcov->mode = KCOV_MODE_DISABLED;
kcov->sequence = 1;
refcount_set(&kcov->refcount, 1);
- spin_lock_init(&kcov->lock);
+ raw_spin_lock_init(&kcov->lock);
filep->private_data = kcov;
return nonseekable_open(inode, filep);
}
@@ -646,18 +646,18 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
kcov->t = t;
kcov->remote = true;
kcov->remote_size = remote_arg->area_size;
- spin_lock_irqsave(&kcov_remote_lock, flags);
+ raw_spin_lock_irqsave(&kcov_remote_lock, flags);
for (i = 0; i < remote_arg->num_handles; i++) {
if (!kcov_check_handle(remote_arg->handles[i],
false, true, false)) {
- spin_unlock_irqrestore(&kcov_remote_lock,
+ raw_spin_unlock_irqrestore(&kcov_remote_lock,
flags);
kcov_disable(t, kcov);
return -EINVAL;
}
remote = kcov_remote_add(kcov, remote_arg->handles[i]);
if (IS_ERR(remote)) {
- spin_unlock_irqrestore(&kcov_remote_lock,
+ raw_spin_unlock_irqrestore(&kcov_remote_lock,
flags);
kcov_disable(t, kcov);
return PTR_ERR(remote);
@@ -666,7 +666,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
if (remote_arg->common_handle) {
if (!kcov_check_handle(remote_arg->common_handle,
true, false, false)) {
- spin_unlock_irqrestore(&kcov_remote_lock,
+ raw_spin_unlock_irqrestore(&kcov_remote_lock,
flags);
kcov_disable(t, kcov);
return -EINVAL;
@@ -674,14 +674,14 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
remote = kcov_remote_add(kcov,
remote_arg->common_handle);
if (IS_ERR(remote)) {
- spin_unlock_irqrestore(&kcov_remote_lock,
+ raw_spin_unlock_irqrestore(&kcov_remote_lock,
flags);
kcov_disable(t, kcov);
return PTR_ERR(remote);
}
t->kcov_handle = remote_arg->common_handle;
}
- spin_unlock_irqrestore(&kcov_remote_lock, flags);
+ raw_spin_unlock_irqrestore(&kcov_remote_lock, flags);
/* Put either in kcov_task_exit() or in KCOV_DISABLE. */
kcov_get(kcov);
return 0;
@@ -716,16 +716,16 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
area = vmalloc_user(size * sizeof(unsigned long));
if (area == NULL)
return -ENOMEM;
- spin_lock_irqsave(&kcov->lock, flags);
+ raw_spin_lock_irqsave(&kcov->lock, flags);
if (kcov->mode != KCOV_MODE_DISABLED) {
- spin_unlock_irqrestore(&kcov->lock, flags);
+ raw_spin_unlock_irqrestore(&kcov->lock, flags);
vfree(area);
return -EBUSY;
}
kcov->area = area;
kcov->size = size;
kcov->mode = KCOV_MODE_INIT;
- spin_unlock_irqrestore(&kcov->lock, flags);
+ raw_spin_unlock_irqrestore(&kcov->lock, flags);
return 0;
case KCOV_REMOTE_ENABLE:
if (get_user(remote_num_handles, (unsigned __user *)(arg +
@@ -749,9 +749,9 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
* All other commands can be normally executed under a spin lock, so we
* obtain and release it here in order to simplify kcov_ioctl_locked().
*/
- spin_lock_irqsave(&kcov->lock, flags);
+ raw_spin_lock_irqsave(&kcov->lock, flags);
res = kcov_ioctl_locked(kcov, cmd, arg);
- spin_unlock_irqrestore(&kcov->lock, flags);
+ raw_spin_unlock_irqrestore(&kcov->lock, flags);
kfree(remote_arg);
return res;
}
@@ -883,10 +883,10 @@ void kcov_remote_start(u64 handle)
return;
}
- spin_lock(&kcov_remote_lock);
+ raw_spin_lock(&kcov_remote_lock);
remote = kcov_remote_find(handle);
if (!remote) {
- spin_unlock(&kcov_remote_lock);
+ raw_spin_unlock(&kcov_remote_lock);
local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
return;
}
@@ -908,7 +908,7 @@ void kcov_remote_start(u64 handle)
size = CONFIG_KCOV_IRQ_AREA_SIZE;
area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
}
- spin_unlock(&kcov_remote_lock);
+ raw_spin_unlock(&kcov_remote_lock);
/* Can only happen when in_task(). */
if (!area) {
@@ -1037,19 +1037,19 @@ void kcov_remote_stop(void)
kcov_remote_softirq_stop(t);
}
- spin_lock(&kcov->lock);
+ raw_spin_lock(&kcov->lock);
/*
* KCOV_DISABLE could have been called between kcov_remote_start()
* and kcov_remote_stop(), hence the sequence check.
*/
if (sequence == kcov->sequence && kcov->remote)
kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
- spin_unlock(&kcov->lock);
+ raw_spin_unlock(&kcov->lock);
if (in_task()) {
- spin_lock(&kcov_remote_lock);
+ raw_spin_lock(&kcov_remote_lock);
kcov_remote_area_put(area, size);
- spin_unlock(&kcov_remote_lock);
+ raw_spin_unlock(&kcov_remote_lock);
}
local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
--
2.50.0
On Sun, 3 Aug 2025 07:20:43 +0000 Yunseong Kim <ysk@kzalloc.com> wrote: > The locks kcov->lock and kcov_remote_lock can be acquired from > atomic contexts, such as instrumentation hooks invoked from interrupt > handlers. > > On PREEMPT_RT-enabled kernels, spinlock_t is typically implemented On PREEMPT_RT is implemented as a sleeping lock. You don't need to say "typically". > as a sleeping lock (e.g., mapped to an rt_mutex). Acquiring such a > lock in atomic context, where sleeping is not allowed, can lead to > system hangs or crashes. > > To avoid this, convert both locks to raw_spinlock_t, which always > provides non-sleeping spinlock semantics regardless of preemption model. > > Signed-off-by: Yunseong Kim <ysk@kzalloc.com> > --- > kernel/kcov.c | 58 +++++++++++++++++++++++++-------------------------- > 1 file changed, 29 insertions(+), 29 deletions(-) > > diff --git a/kernel/kcov.c b/kernel/kcov.c > index 187ba1b80bda..7d9b53385d81 100644 > --- a/kernel/kcov.c > +++ b/kernel/kcov.c > @@ -54,7 +54,7 @@ struct kcov { > */ > refcount_t refcount; > /* The lock protects mode, size, area and t. */ > - spinlock_t lock; > + raw_spinlock_t lock; > enum kcov_mode mode; > /* Size of arena (in long's). */ > unsigned int size; > @@ -84,7 +84,7 @@ struct kcov_remote { > struct hlist_node hnode; > }; > > -static DEFINE_SPINLOCK(kcov_remote_lock); > +static DEFINE_RAW_SPINLOCK(kcov_remote_lock); > static DEFINE_HASHTABLE(kcov_remote_map, 4); > static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas); > > @@ -406,7 +406,7 @@ static void kcov_remote_reset(struct kcov *kcov) > struct hlist_node *tmp; > unsigned long flags; > > - spin_lock_irqsave(&kcov_remote_lock, flags); > + raw_spin_lock_irqsave(&kcov_remote_lock, flags); Not related to these patches, but have you thought about converting some of these locks over to the "guard()" infrastructure provided by cleanup.h? > hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) { > if (remote->kcov != kcov) > continue; Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org> -- Steve
Hi Steve, Thanks for the review and the suggestion. On 8/5/25 1:27 오전, Steven Rostedt wrote: > On Sun, 3 Aug 2025 07:20:43 +0000 > Yunseong Kim <ysk@kzalloc.com> wrote: > >> The locks kcov->lock and kcov_remote_lock can be acquired from >> atomic contexts, such as instrumentation hooks invoked from interrupt >> handlers. >> >> On PREEMPT_RT-enabled kernels, spinlock_t is typically implemented > > On PREEMPT_RT is implemented as a sleeping lock. You don't need to say > "typically". You're right — the phrase "typically implemented as a sleeping lock" was inaccurate. On PREEMPT_RT, spinlock_t is implemented as a sleeping lock, and I'll make sure to correct that wording in the next version. >> as a sleeping lock (e.g., mapped to an rt_mutex). Acquiring such a >> lock in atomic context, where sleeping is not allowed, can lead to >> system hangs or crashes. >> >> To avoid this, convert both locks to raw_spinlock_t, which always >> provides non-sleeping spinlock semantics regardless of preemption model. >> >> Signed-off-by: Yunseong Kim <ysk@kzalloc.com> >> --- >> kernel/kcov.c | 58 +++++++++++++++++++++++++-------------------------- >> 1 file changed, 29 insertions(+), 29 deletions(-) >> >> diff --git a/kernel/kcov.c b/kernel/kcov.c >> index 187ba1b80bda..7d9b53385d81 100644 >> --- a/kernel/kcov.c >> +++ b/kernel/kcov.c >> @@ -54,7 +54,7 @@ struct kcov { >> */ >> refcount_t refcount; >> /* The lock protects mode, size, area and t. */ >> - spinlock_t lock; >> + raw_spinlock_t lock; >> enum kcov_mode mode; >> /* Size of arena (in long's). */ >> unsigned int size; >> @@ -84,7 +84,7 @@ struct kcov_remote { >> struct hlist_node hnode; >> }; >> >> -static DEFINE_SPINLOCK(kcov_remote_lock); >> +static DEFINE_RAW_SPINLOCK(kcov_remote_lock); >> static DEFINE_HASHTABLE(kcov_remote_map, 4); >> static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas); >> >> @@ -406,7 +406,7 @@ static void kcov_remote_reset(struct kcov *kcov) >> struct hlist_node *tmp; >> unsigned long flags; >> >> - spin_lock_irqsave(&kcov_remote_lock, flags); >> + raw_spin_lock_irqsave(&kcov_remote_lock, flags); > > Not related to these patches, but have you thought about converting some of > these locks over to the "guard()" infrastructure provided by cleanup.h? Also, I appreciate your note about the guard() infrastructure from cleanup.h. I'll look into whether it's applicable in this context, and plan to adopt it where appropriate in the next iteration of the series. >> hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) { >> if (remote->kcov != kcov) >> continue; > > Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org> > > -- Steve Thanks again for the feedback and for the Reviewed-by tag! Best regards, Yunseong Kim
© 2016 - 2025 Red Hat, Inc.