[RESEND] [GIT PULL] Probes: Updates for v7.0

Masami Hiramatsu (Google) posted 1 patch 1 month, 2 weeks ago
kernel/kprobes.c            | 124 ++++++++++++++++++++++++++++++++++----------
kernel/trace/trace_kprobe.c |   4 ++
2 files changed, 102 insertions(+), 26 deletions(-)
[RESEND] [GIT PULL] Probes: Updates for v7.0
Posted by Masami Hiramatsu (Google) 1 month, 2 weeks ago
Hi Linus,

Just for kicking the process, I resend this pull request again.
I'm sorry for making you go through the trouble twice.


Probes for v7.0

- kprobes: Use a dedicated kernel thread to optimize the kprobes
  instead of using workqueue thread. Since the kprobe optimizer waits
  a long time for synchronize_rcu_task(), it can block other workers
  in the same queue if it uses a workqueue.

- kprobe-events: Returns immediately if no new probe events are
  specified on the kernel command line at boot time. This shorten the
  kernel boot time.

- kprobes: When a kprobe is fully removed from the kernel code,
  retry optimizing another kprobe which is blocked by that kprobe.


Please pull the latest probes-v7.0 tree, which can be found at:


  git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git
probes-v7.0

Tag SHA1: d5984c30c657c796837a3e7667b3086246a43a68
Head SHA1: 73c12f209462d1712c5f55f3021a1b65b2e084c3


Masami Hiramatsu (Google) (1):
      kprobes: Use dedicated kthread for kprobe optimizer

hongao (1):
      kprobes: retry blocked optprobe in do_free_cleaned_kprobes

sunliming (1):
      tracing: kprobe-event: Return directly when trace kprobes is empty

----
 kernel/kprobes.c            | 124 ++++++++++++++++++++++++++++++++++----------
 kernel/trace/trace_kprobe.c |   4 ++
 2 files changed, 102 insertions(+), 26 deletions(-)
---------------------------
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ab8f9fc1f0d1..e2cd01cf5968 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -32,6 +32,7 @@
 #include <linux/debugfs.h>
 #include <linux/sysctl.h>
 #include <linux/kdebug.h>
+#include <linux/kthread.h>
 #include <linux/memory.h>
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
@@ -40,6 +41,7 @@
 #include <linux/perf_event.h>
 #include <linux/execmem.h>
 #include <linux/cleanup.h>
+#include <linux/wait.h>
 
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
@@ -514,8 +516,18 @@ static LIST_HEAD(optimizing_list);
 static LIST_HEAD(unoptimizing_list);
 static LIST_HEAD(freeing_list);
 
-static void kprobe_optimizer(struct work_struct *work);
-static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
+static void optimize_kprobe(struct kprobe *p);
+static struct task_struct *kprobe_optimizer_task;
+static wait_queue_head_t kprobe_optimizer_wait;
+static atomic_t optimizer_state;
+enum {
+	OPTIMIZER_ST_IDLE = 0,
+	OPTIMIZER_ST_KICKED = 1,
+	OPTIMIZER_ST_FLUSHING = 2,
+};
+
+static DECLARE_COMPLETION(optimizer_completion);
+
 #define OPTIMIZE_DELAY 5
 
 /*
@@ -593,18 +605,25 @@ static void do_free_cleaned_kprobes(void)
 			 */
 			continue;
 		}
+
+		/*
+		 * The aggregator was holding back another probe while it sat on the
+		 * unoptimizing/freeing lists.  Now that the aggregator has been fully
+		 * reverted we can safely retry the optimization of that sibling.
+		 */
+
+		struct kprobe *_p = get_optimized_kprobe(op->kp.addr);
+		if (unlikely(_p))
+			optimize_kprobe(_p);
+
 		free_aggr_kprobe(&op->kp);
 	}
 }
 
-/* Start optimizer after OPTIMIZE_DELAY passed */
-static void kick_kprobe_optimizer(void)
-{
-	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
-}
+static void kick_kprobe_optimizer(void);
 
 /* Kprobe jump optimizer */
-static void kprobe_optimizer(struct work_struct *work)
+static void kprobe_optimizer(void)
 {
 	guard(mutex)(&kprobe_mutex);
 
@@ -635,9 +654,53 @@ static void kprobe_optimizer(struct work_struct *work)
 		do_free_cleaned_kprobes();
 	}
 
-	/* Step 5: Kick optimizer again if needed */
+	/* Step 5: Kick optimizer again if needed. But if there is a flush requested, */
+	if (completion_done(&optimizer_completion))
+		complete(&optimizer_completion);
+
 	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
-		kick_kprobe_optimizer();
+		kick_kprobe_optimizer();	/*normal kick*/
+}
+
+static int kprobe_optimizer_thread(void *data)
+{
+	while (!kthread_should_stop()) {
+		/* To avoid hung_task, wait in interruptible state. */
+		wait_event_interruptible(kprobe_optimizer_wait,
+			   atomic_read(&optimizer_state) != OPTIMIZER_ST_IDLE ||
+			   kthread_should_stop());
+
+		if (kthread_should_stop())
+			break;
+
+		/*
+		 * If it was a normal kick, wait for OPTIMIZE_DELAY.
+		 * This wait can be interrupted by a flush request.
+		 */
+		if (atomic_read(&optimizer_state) == 1)
+			wait_event_interruptible_timeout(
+				kprobe_optimizer_wait,
+				atomic_read(&optimizer_state) == OPTIMIZER_ST_FLUSHING ||
+				kthread_should_stop(),
+				OPTIMIZE_DELAY);
+
+		if (kthread_should_stop())
+			break;
+
+		atomic_set(&optimizer_state, OPTIMIZER_ST_IDLE);
+
+		kprobe_optimizer();
+	}
+	return 0;
+}
+
+/* Start optimizer after OPTIMIZE_DELAY passed */
+static void kick_kprobe_optimizer(void)
+{
+	lockdep_assert_held(&kprobe_mutex);
+	if (atomic_cmpxchg(&optimizer_state,
+		OPTIMIZER_ST_IDLE, OPTIMIZER_ST_KICKED) == OPTIMIZER_ST_IDLE)
+		wake_up(&kprobe_optimizer_wait);
 }
 
 static void wait_for_kprobe_optimizer_locked(void)
@@ -645,13 +708,17 @@ static void wait_for_kprobe_optimizer_locked(void)
 	lockdep_assert_held(&kprobe_mutex);
 
 	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
-		mutex_unlock(&kprobe_mutex);
-
-		/* This will also make 'optimizing_work' execute immmediately */
-		flush_delayed_work(&optimizing_work);
-		/* 'optimizing_work' might not have been queued yet, relax */
-		cpu_relax();
+		init_completion(&optimizer_completion);
+		/*
+		 * Set state to OPTIMIZER_ST_FLUSHING and wake up the thread if it's
+		 * idle. If it's already kicked, it will see the state change.
+		 */
+		if (atomic_xchg_acquire(&optimizer_state,
+			OPTIMIZER_ST_FLUSHING) != OPTIMIZER_ST_FLUSHING)
+			wake_up(&kprobe_optimizer_wait);
 
+		mutex_unlock(&kprobe_mutex);
+		wait_for_completion(&optimizer_completion);
 		mutex_lock(&kprobe_mutex);
 	}
 }
@@ -1002,16 +1069,23 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
 		if (unlikely(_p) && reopt)
 			optimize_kprobe(_p);
 	}
-	/*
-	 * TODO: Since unoptimization and real disarming will be done by
-	 * the worker thread, we can not check whether another probe are
-	 * unoptimized because of this probe here. It should be re-optimized
-	 * by the worker thread.
-	 */
 }
 
+static void __init init_optprobe(void)
+{
+#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
+	/* Init 'kprobe_optinsn_slots' for allocation */
+	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
+#endif
+
+	init_waitqueue_head(&kprobe_optimizer_wait);
+	atomic_set(&optimizer_state, OPTIMIZER_ST_IDLE);
+	kprobe_optimizer_task = kthread_run(kprobe_optimizer_thread, NULL,
+					    "kprobe-optimizer");
+}
 #else /* !CONFIG_OPTPROBES */
 
+#define init_optprobe()				do {} while (0)
 #define optimize_kprobe(p)			do {} while (0)
 #define unoptimize_kprobe(p, f)			do {} while (0)
 #define kill_optimized_kprobe(p)		do {} while (0)
@@ -2694,10 +2768,8 @@ static int __init init_kprobes(void)
 	/* By default, kprobes are armed */
 	kprobes_all_disarmed = false;
 
-#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
-	/* Init 'kprobe_optinsn_slots' for allocation */
-	kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
-#endif
+	/* Initialize the optimization infrastructure */
+	init_optprobe();
 
 	err = arch_init_kprobes();
 	if (!err)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 9953506370a5..95f2c42603d5 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -82,6 +82,7 @@ static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
 #define for_each_trace_kprobe(pos, dpos)	\
 	for_each_dyn_event(dpos)		\
 		if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
+#define trace_kprobe_list_empty() list_empty(&dyn_event_list)
 
 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
 {
@@ -1982,6 +1983,9 @@ static __init void enable_boot_kprobe_events(void)
 	struct trace_kprobe *tk;
 	struct dyn_event *pos;
 
+	if (trace_kprobe_list_empty())
+		return;
+
 	guard(mutex)(&event_mutex);
 	for_each_trace_kprobe(tk, pos) {
 		list_for_each_entry(file, &tr->events, list)

-- 
Masami Hiramatsu (Google) <mhiramat@kernel.org>

-- 
Masami Hiramatsu (Google) <mhiramat@kernel.org>
Re: [RESEND] [GIT PULL] Probes: Updates for v7.0
Posted by pr-tracker-bot@kernel.org 1 month, 2 weeks ago
The pull request you sent on Tue, 17 Feb 2026 11:01:21 +0900:

> git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git probes-v7.0

has been merged into torvalds/linux.git:
https://git.kernel.org/torvalds/c/2d10a488717e1b314d332f05b5966f7c25716a11

Thank you!

-- 
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/prtracker.html