Replace trace_foo() with trace_call__foo() at sites already guarded by
tracepoint_enabled() or trace_foo_enabled() checks, avoiding a redundant
static_branch_unlikely() re-evaluation inside the tracepoint.
- tick-sched.c: Multiple trace_tick_stop() calls are guarded by an early
return when tracepoint_enabled(tick_stop) is false.
- trace_benchmark.c: trace_benchmark_event() is guarded by an early return
when !trace_benchmark_event_enabled().
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Vineeth Pillai (Google) <vineeth@bitbyteword.org>
---
kernel/time/tick-sched.c | 12 ++++++------
kernel/trace/trace_benchmark.c | 2 +-
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f7907fadd63f2..f8ab472e30858 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -348,32 +348,32 @@ static bool check_tick_dependency(atomic_t *dep)
return !val;
if (val & TICK_DEP_MASK_POSIX_TIMER) {
- trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
+ trace_call__tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
return true;
}
if (val & TICK_DEP_MASK_PERF_EVENTS) {
- trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
+ trace_call__tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
return true;
}
if (val & TICK_DEP_MASK_SCHED) {
- trace_tick_stop(0, TICK_DEP_MASK_SCHED);
+ trace_call__tick_stop(0, TICK_DEP_MASK_SCHED);
return true;
}
if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
- trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
+ trace_call__tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
return true;
}
if (val & TICK_DEP_MASK_RCU) {
- trace_tick_stop(0, TICK_DEP_MASK_RCU);
+ trace_call__tick_stop(0, TICK_DEP_MASK_RCU);
return true;
}
if (val & TICK_DEP_MASK_RCU_EXP) {
- trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
+ trace_call__tick_stop(0, TICK_DEP_MASK_RCU_EXP);
return true;
}
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index e19c32f2a9381..189d383934fd3 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -51,7 +51,7 @@ static void trace_do_benchmark(void)
local_irq_disable();
start = trace_clock_local();
- trace_benchmark_event(bm_str, bm_last);
+ trace_call__benchmark_event(bm_str, bm_last);
stop = trace_clock_local();
local_irq_enable();
--
2.53.0