From nobody Sun Feb 8 18:27:41 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1A5EDC4332F for ; Wed, 8 Nov 2023 14:27:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S235076AbjKHO1G (ORCPT ); Wed, 8 Nov 2023 09:27:06 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45768 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234194AbjKHO0u (ORCPT ); Wed, 8 Nov 2023 09:26:50 -0500 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 444BE1FCA for ; Wed, 8 Nov 2023 06:26:47 -0800 (PST) Received: by smtp.kernel.org (Postfix) with ESMTPSA id EB22DC433C7; Wed, 8 Nov 2023 14:26:43 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1699453607; bh=PGDUfwnoIONMaYaUdFfjGcHiRqsVhJQ8kOFWGaBSKu4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=gb3vczg564oagpi3FxQYubdBt5fWygPF1EFxo/wo3vjs3yDMmCcc+UAjPNSbxCaAq /xgiRyNZqgte7nhmcDCVGyiXNoi2nJUOLRrBbpTBAVF2PL8kxLnuW9nEv/xeUjQGG+ +jpcNjetANizruYtwLyasNMHbHSEU1b7DdNunZwaUnsr2kR+P4Kl5TOrtnH008QCrc 4kf/MUiFEyWUpxBRk+22PHHp0pQRrsNPLlQneG4FvnHxru+PRU5rUY/duUWe5GBrMd IYrDQ6kzKcwq7i9qdSUXwzmuTfXTO57v4HUubABeUsJ3btOKkAl2w3NpzenCP0WFK4 186FSvF1ksjzg== From: "Masami Hiramatsu (Google)" To: Alexei Starovoitov , Steven Rostedt , Florent Revest Cc: linux-trace-kernel@vger.kernel.org, LKML , Martin KaFai Lau , bpf , Sven Schnelle , Alexei Starovoitov , Jiri Olsa , Arnaldo Carvalho de Melo , Daniel Borkmann , Alan Maguire , Mark Rutland , Peter Zijlstra , Thomas Gleixner , Guo Ren Subject: [RFC PATCH v2 12/31] function_graph: Have the instances use their own ftrace_ops for filtering Date: Wed, 8 Nov 2023 23:26:42 +0900 Message-Id: <169945360154.55307.2938894711228282149.stgit@devnote2> X-Mailer: git-send-email 2.34.1 In-Reply-To: <169945345785.55307.5003201137843449313.stgit@devnote2> References: <169945345785.55307.5003201137843449313.stgit@devnote2> User-Agent: StGit/0.19 MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Steven Rostedt (VMware) Allow for instances to have their own ftrace_ops part of the fgraph_ops that makes the funtion_graph tracer filter on the set_ftrace_filter file of the instance and not the top instance. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Masami Hiramatsu (Google) --- Changes in v2: - Use ftrace_graph_func and FTRACE_OPS_GRAPH_STUB instead of ftrace_stub and FTRACE_OPS_FL_STUB for new ftrace based fgraph. --- include/linux/ftrace.h | 1 + kernel/trace/fgraph.c | 60 +++++++++++++++++++++---------= ---- kernel/trace/ftrace.c | 6 ++- kernel/trace/trace.h | 16 +++++---- kernel/trace/trace_functions.c | 2 + kernel/trace/trace_functions_graph.c | 8 +++-- 6 files changed, 58 insertions(+), 35 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 84e06ad1b121..d30eb8a97a50 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1069,6 +1069,7 @@ extern int ftrace_graph_entry_stub(struct ftrace_grap= h_ent *trace, struct fgraph struct fgraph_ops { trace_func_graph_ent_t entryfunc; trace_func_graph_ret_t retfunc; + struct ftrace_ops ops; /* for the hash lists */ void *private; }; =20 diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 16bbb9fa3e03..97cf320d20a8 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -17,14 +17,6 @@ #include "ftrace_internal.h" #include "trace.h" =20 -#ifdef CONFIG_DYNAMIC_FTRACE -#define ASSIGN_OPS_HASH(opsname, val) \ - .func_hash =3D val, \ - .local_hash.regex_lock =3D __MUTEX_INITIALIZER(opsname.local_hash.regex_l= ock), -#else -#define ASSIGN_OPS_HASH(opsname, val) -#endif - #define FGRAPH_RET_SIZE sizeof(struct ftrace_ret_stack) #define FGRAPH_RET_INDEX (FGRAPH_RET_SIZE / sizeof(long)) =20 @@ -337,9 +329,6 @@ int function_graph_enter(unsigned long ret, unsigned lo= ng func, return -EBUSY; #endif =20 - if (!ftrace_ops_test(&global_ops, func, NULL)) - return -EBUSY; - trace.func =3D func; trace.depth =3D ++current->curr_ret_depth; =20 @@ -360,7 +349,8 @@ int function_graph_enter(unsigned long ret, unsigned lo= ng func, atomic_inc(¤t->trace_overrun); break; } - if (fgraph_array[i]->entryfunc(&trace, fgraph_array[i])) { + if (ftrace_ops_test(&gops->ops, func, NULL) && + gops->entryfunc(&trace, gops)) { offset =3D current->curr_ret_stack; /* Check the top level stored word */ type =3D get_fgraph_type(current, offset - 1); @@ -655,17 +645,25 @@ unsigned long ftrace_graph_ret_addr(struct task_struc= t *task, int *idx, } #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ =20 -static struct ftrace_ops graph_ops =3D { - .func =3D ftrace_graph_func, - .flags =3D FTRACE_OPS_FL_INITIALIZED | - FTRACE_OPS_FL_PID | - FTRACE_OPS_GRAPH_STUB, +void fgraph_init_ops(struct ftrace_ops *dst_ops, + struct ftrace_ops *src_ops) +{ + dst_ops->func =3D ftrace_graph_func; + dst_ops->flags =3D FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB; + #ifdef FTRACE_GRAPH_TRAMP_ADDR - .trampoline =3D FTRACE_GRAPH_TRAMP_ADDR, + dst_ops->trampoline =3D FTRACE_GRAPH_TRAMP_ADDR; /* trampoline_size is only needed for dynamically allocated tramps */ #endif - ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) -}; + +#ifdef CONFIG_DYNAMIC_FTRACE + if (src_ops) { + dst_ops->func_hash =3D &src_ops->local_hash; + mutex_init(&dst_ops->local_hash.regex_lock); + dst_ops->flags |=3D FTRACE_OPS_FL_INITIALIZED; + } +#endif +} =20 void ftrace_graph_sleep_time_control(bool enable) { @@ -870,11 +868,20 @@ static int start_graph_tracing(void) =20 int register_ftrace_graph(struct fgraph_ops *gops) { + int command =3D 0; int ret =3D 0; int i; =20 mutex_lock(&ftrace_lock); =20 + if (!gops->ops.func) { + gops->ops.flags |=3D FTRACE_OPS_GRAPH_STUB; + gops->ops.func =3D ftrace_graph_func; +#ifdef FTRACE_GRAPH_TRAMP_ADDR + gops->ops.trampoline =3D FTRACE_GRAPH_TRAMP_ADDR; +#endif + } + if (!fgraph_array[0]) { /* The array must always have real data on it */ for (i =3D 0; i < FGRAPH_ARRAY_SIZE; i++) @@ -910,9 +917,10 @@ int register_ftrace_graph(struct fgraph_ops *gops) */ ftrace_graph_return =3D return_run; ftrace_graph_entry =3D entry_run; - - ret =3D ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); + command =3D FTRACE_START_FUNC_RET; } + + ret =3D ftrace_startup(&gops->ops, command); out: mutex_unlock(&ftrace_lock); return ret; @@ -920,6 +928,7 @@ int register_ftrace_graph(struct fgraph_ops *gops) =20 void unregister_ftrace_graph(struct fgraph_ops *gops) { + int command =3D 0; int i; =20 mutex_lock(&ftrace_lock); @@ -942,10 +951,15 @@ void unregister_ftrace_graph(struct fgraph_ops *gops) } =20 ftrace_graph_active--; + + if (!ftrace_graph_active) + command =3D FTRACE_STOP_FUNC_RET; + + ftrace_shutdown(&gops->ops, command); + if (!ftrace_graph_active) { ftrace_graph_return =3D ftrace_stub_graph; ftrace_graph_entry =3D ftrace_graph_entry_stub; - ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); unregister_pm_notifier(&ftrace_suspend_notifier); unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); } diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 83fbfb7b48f8..c4cc2a9d0047 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3050,6 +3050,8 @@ int ftrace_startup(struct ftrace_ops *ops, int comman= d) if (unlikely(ftrace_disabled)) return -ENODEV; =20 + ftrace_ops_init(ops); + ret =3D __register_ftrace_function(ops); if (ret) return ret; @@ -7319,7 +7321,7 @@ __init void ftrace_init_global_array_ops(struct trace= _array *tr) tr->ops =3D &global_ops; tr->ops->private =3D tr; ftrace_init_trace_array(tr); - init_array_fgraph_ops(tr); + init_array_fgraph_ops(tr, tr->ops); } =20 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) @@ -8051,7 +8053,7 @@ static int register_ftrace_function_nolock(struct ftr= ace_ops *ops) */ int register_ftrace_function(struct ftrace_ops *ops) { - int ret; + int ret =3D -1; =20 lock_direct_mutex(); ret =3D prepare_direct_functions_for_ipmodify(ops); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index febb9c6d01c7..f77322e3b177 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -885,8 +885,8 @@ extern int __trace_graph_entry(struct trace_array *tr, extern void __trace_graph_return(struct trace_array *tr, struct ftrace_graph_ret *trace, unsigned int trace_ctx); -extern void init_array_fgraph_ops(struct trace_array *tr); -extern int allocate_fgraph_ops(struct trace_array *tr); +extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_op= s *ops); +extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *= ops); extern void free_fgraph_ops(struct trace_array *tr); =20 #ifdef CONFIG_DYNAMIC_FTRACE @@ -969,6 +969,7 @@ static inline int ftrace_graph_notrace_addr(unsigned lo= ng addr) preempt_enable_notrace(); return ret; } + #else static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) { @@ -994,18 +995,19 @@ static inline bool ftrace_graph_ignore_func(struct ft= race_graph_ent *trace) (fgraph_max_depth && trace->depth >=3D fgraph_max_depth); } =20 +void fgraph_init_ops(struct ftrace_ops *dst_ops, + struct ftrace_ops *src_ops); + #else /* CONFIG_FUNCTION_GRAPH_TRACER */ static inline enum print_line_t print_graph_function_flags(struct trace_iterator *iter, u32 flags) { return TRACE_TYPE_UNHANDLED; } -static inline void init_array_fgraph_ops(struct trace_array *tr) { } -static inline int allocate_fgraph_ops(struct trace_array *tr) -{ - return 0; -} static inline void free_fgraph_ops(struct trace_array *tr) { } +/* ftrace_ops may not be defined */ +#define init_array_fgraph_ops(tr, ops) do { } while (0) +#define allocate_fgraph_ops(tr, ops) ({ 0; }) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ =20 extern struct list_head ftrace_pids; diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 8e8da0d0ee52..13bf2415245d 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -91,7 +91,7 @@ int ftrace_create_function_files(struct trace_array *tr, if (!tr->ops) return -EINVAL; =20 - ret =3D allocate_fgraph_ops(tr); + ret =3D allocate_fgraph_ops(tr, tr->ops); if (ret) { kfree(tr->ops); return ret; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_func= tions_graph.c index 9ccc904a7703..7f30652f0e97 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -288,7 +288,7 @@ static struct fgraph_ops funcgraph_ops =3D { .retfunc =3D &trace_graph_return, }; =20 -int allocate_fgraph_ops(struct trace_array *tr) +int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops) { struct fgraph_ops *gops; =20 @@ -301,6 +301,9 @@ int allocate_fgraph_ops(struct trace_array *tr) =20 tr->gops =3D gops; gops->private =3D tr; + + fgraph_init_ops(&gops->ops, ops); + return 0; } =20 @@ -309,10 +312,11 @@ void free_fgraph_ops(struct trace_array *tr) kfree(tr->gops); } =20 -__init void init_array_fgraph_ops(struct trace_array *tr) +__init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_op= s *ops) { tr->gops =3D &funcgraph_ops; funcgraph_ops.private =3D tr; + fgraph_init_ops(&tr->gops->ops, ops); } =20 static int graph_trace_init(struct trace_array *tr)