From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Rewrite get_entry_ip() to use ftrace_get_symaddr() macro.
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
---
Changes in v19:
- Use ftrace_get_symaddr() instead of introducing new arch dependent code.
- Also, replace x86 code with ftrace_get_symaddr(), which does the same
thing.
---
kernel/trace/bpf_trace.c | 19 ++-----------------
1 file changed, 2 insertions(+), 17 deletions(-)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 1532e9172bf9..e848a782bc8d 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1024,27 +1024,12 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
.arg1_type = ARG_PTR_TO_CTX,
};
-#ifdef CONFIG_X86_KERNEL_IBT
static unsigned long get_entry_ip(unsigned long fentry_ip)
{
- u32 instr;
+ unsigned long ret = ftrace_get_symaddr(fentry_ip);
- /* We want to be extra safe in case entry ip is on the page edge,
- * but otherwise we need to avoid get_kernel_nofault()'s overhead.
- */
- if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
- if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
- return fentry_ip;
- } else {
- instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
- }
- if (is_endbr(instr))
- fentry_ip -= ENDBR_INSN_SIZE;
- return fentry_ip;
+ return ret ? : fentry_ip;
}
-#else
-#define get_entry_ip(fentry_ip) fentry_ip
-#endif
BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
{