kernel/trace/trace_kprobe.c | 4 ++++ 1 file changed, 4 insertions(+)
From: sunliming <sunliming@kylinos.cn>
In enable_boot_kprobe_events(), it returns directly when trace kprobes is
empty, thereby reducing the function's execution time. This function may
otherwise wait for the event_mutex lock for tens of milliseconds on certain
machines, which is unnecessary when trace kprobes is empty.
Signed-off-by: sunliming <sunliming@kylinos.cn>
---
v2:
- wrap the the null check for the dyn_event_list with macro trace_kprobe_list_empty
---
kernel/trace/trace_kprobe.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 9953506370a5..95f2c42603d5 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -82,6 +82,7 @@ static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
#define for_each_trace_kprobe(pos, dpos) \
for_each_dyn_event(dpos) \
if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
+#define trace_kprobe_list_empty() list_empty(&dyn_event_list)
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
{
@@ -1982,6 +1983,9 @@ static __init void enable_boot_kprobe_events(void)
struct trace_kprobe *tk;
struct dyn_event *pos;
+ if (trace_kprobe_list_empty())
+ return;
+
guard(mutex)(&event_mutex);
for_each_trace_kprobe(tk, pos) {
list_for_each_entry(file, &tr->events, list)
--
2.25.1
On Tue, 27 Jan 2026 13:38:48 +0800
sunliming@linux.dev wrote:
> From: sunliming <sunliming@kylinos.cn>
>
> In enable_boot_kprobe_events(), it returns directly when trace kprobes is
> empty, thereby reducing the function's execution time. This function may
> otherwise wait for the event_mutex lock for tens of milliseconds on certain
> machines, which is unnecessary when trace kprobes is empty.
>
> Signed-off-by: sunliming <sunliming@kylinos.cn>
Looks good to me. Let me pick it.
Thanks,
> ---
> v2:
> - wrap the the null check for the dyn_event_list with macro trace_kprobe_list_empty
> ---
> kernel/trace/trace_kprobe.c | 4 ++++
> 1 file changed, 4 insertions(+)
>
> diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
> index 9953506370a5..95f2c42603d5 100644
> --- a/kernel/trace/trace_kprobe.c
> +++ b/kernel/trace/trace_kprobe.c
> @@ -82,6 +82,7 @@ static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
> #define for_each_trace_kprobe(pos, dpos) \
> for_each_dyn_event(dpos) \
> if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
> +#define trace_kprobe_list_empty() list_empty(&dyn_event_list)
>
> static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
> {
> @@ -1982,6 +1983,9 @@ static __init void enable_boot_kprobe_events(void)
> struct trace_kprobe *tk;
> struct dyn_event *pos;
>
> + if (trace_kprobe_list_empty())
> + return;
> +
> guard(mutex)(&event_mutex);
> for_each_trace_kprobe(tk, pos) {
> list_for_each_entry(file, &tr->events, list)
> --
> 2.25.1
>
--
Masami Hiramatsu (Google) <mhiramat@kernel.org>
© 2016 - 2026 Red Hat, Inc.