arch/riscv/kernel/probes/kprobes.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-)
When adding a kprobe such as "p:probe/tcp_sendmsg _text+15392192",
arch_check_kprobe would start iterating all instructions starting from
_text until the probed address. Not only is this very inefficient, but
literal values in there (e.g. left by function patching) are
misinterpreted in a way that causes a desync.
Fix this by doing it like x86: start the iteration at the closest
preceding symbol instead of the given starting point.
Fixes: 87f48c7ccc73 ("riscv: kprobe: Fixup kernel panic when probing an illegal position")
Signed-off-by: Fabian Vogt <fvogt@suse.de>
Signed-off-by: Marvin Friedrich <marvin.friedrich@suse.com>
---
arch/riscv/kernel/probes/kprobes.c | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index c0738d6c6498..8723390c7cad 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -49,10 +49,15 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
post_kprobe_handler(p, kcb, regs);
}
-static bool __kprobes arch_check_kprobe(struct kprobe *p)
+static bool __kprobes arch_check_kprobe(unsigned long addr)
{
- unsigned long tmp = (unsigned long)p->addr - p->offset;
- unsigned long addr = (unsigned long)p->addr;
+ unsigned long tmp, offset;
+
+ /* start iterating at the closest preceding symbol */
+ if (!kallsyms_lookup_size_offset(addr, NULL, &offset))
+ return false;
+
+ tmp = addr - offset;
while (tmp <= addr) {
if (tmp == addr)
@@ -71,7 +76,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if ((unsigned long)insn & 0x1)
return -EILSEQ;
- if (!arch_check_kprobe(p))
+ if (!arch_check_kprobe((unsigned long)p->addr))
return -EILSEQ;
/* copy instruction */
--
2.51.0
On Wed, Sep 10, 2025 at 11:25 PM Fabian Vogt <fvogt@suse.de> wrote: > > When adding a kprobe such as "p:probe/tcp_sendmsg _text+15392192", > arch_check_kprobe would start iterating all instructions starting from > _text until the probed address. Not only is this very inefficient, but > literal values in there (e.g. left by function patching) are > misinterpreted in a way that causes a desync. LGTM! This prevents bad p->offset from causing problems. Acked-by: Guo Ren <guoren@kernel.org> > > Fix this by doing it like x86: start the iteration at the closest > preceding symbol instead of the given starting point. > > Fixes: 87f48c7ccc73 ("riscv: kprobe: Fixup kernel panic when probing an illegal position") > Signed-off-by: Fabian Vogt <fvogt@suse.de> > Signed-off-by: Marvin Friedrich <marvin.friedrich@suse.com> > --- > arch/riscv/kernel/probes/kprobes.c | 13 +++++++++---- > 1 file changed, 9 insertions(+), 4 deletions(-) > > diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c > index c0738d6c6498..8723390c7cad 100644 > --- a/arch/riscv/kernel/probes/kprobes.c > +++ b/arch/riscv/kernel/probes/kprobes.c > @@ -49,10 +49,15 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) > post_kprobe_handler(p, kcb, regs); > } > > -static bool __kprobes arch_check_kprobe(struct kprobe *p) > +static bool __kprobes arch_check_kprobe(unsigned long addr) > { > - unsigned long tmp = (unsigned long)p->addr - p->offset; > - unsigned long addr = (unsigned long)p->addr; > + unsigned long tmp, offset; > + > + /* start iterating at the closest preceding symbol */ > + if (!kallsyms_lookup_size_offset(addr, NULL, &offset)) > + return false; > + > + tmp = addr - offset; > > while (tmp <= addr) { > if (tmp == addr) > @@ -71,7 +76,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) > if ((unsigned long)insn & 0x1) > return -EILSEQ; > > - if (!arch_check_kprobe(p)) > + if (!arch_check_kprobe((unsigned long)p->addr)) > return -EILSEQ; > > /* copy instruction */ > -- > 2.51.0 > > > > -- Best Regards Guo Ren
© 2016 - 2025 Red Hat, Inc.