Adding support to attach BPF program for entry and return probe
of the same function. This is common use case which at the moment
requires to create two uprobe multi links.
Adding new BPF_TRACE_UPROBE_SESSION attach type that instructs
kernel to attach single link program to both entry and exit probe.
It's possible to control execution of the BPF program on return
probe simply by returning zero or non zero from the entry BPF
program execution to execute or not the BPF program on return
probe respectively.
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
---
include/uapi/linux/bpf.h | 1 +
kernel/bpf/syscall.c | 9 ++++++--
kernel/trace/bpf_trace.c | 39 +++++++++++++++++++++++++++-------
tools/include/uapi/linux/bpf.h | 1 +
tools/lib/bpf/libbpf.c | 1 +
5 files changed, 41 insertions(+), 10 deletions(-)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8ab4d8184b9d..77d0bc5fa986 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1116,6 +1116,7 @@ enum bpf_attach_type {
BPF_NETKIT_PRIMARY,
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
+ BPF_TRACE_UPROBE_SESSION,
__MAX_BPF_ATTACH_TYPE
};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a8f1808a1ca5..0cf7617e6cb6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -3983,10 +3983,14 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
attach_type != BPF_TRACE_UPROBE_MULTI)
return -EINVAL;
+ if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION &&
+ attach_type != BPF_TRACE_UPROBE_SESSION)
+ return -EINVAL;
if (attach_type != BPF_PERF_EVENT &&
attach_type != BPF_TRACE_KPROBE_MULTI &&
attach_type != BPF_TRACE_KPROBE_SESSION &&
- attach_type != BPF_TRACE_UPROBE_MULTI)
+ attach_type != BPF_TRACE_UPROBE_MULTI &&
+ attach_type != BPF_TRACE_UPROBE_SESSION)
return -EINVAL;
return 0;
case BPF_PROG_TYPE_SCHED_CLS:
@@ -5239,7 +5243,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI ||
attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION)
ret = bpf_kprobe_multi_link_attach(attr, prog);
- else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
+ else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI ||
+ attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION)
ret = bpf_uprobe_multi_link_attach(attr, prog);
break;
default:
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index fdab7ecd8dfa..98e940ec184d 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1557,6 +1557,17 @@ static inline bool is_kprobe_session(const struct bpf_prog *prog)
return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION;
}
+static inline bool is_uprobe_multi(const struct bpf_prog *prog)
+{
+ return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI ||
+ prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
+}
+
+static inline bool is_uprobe_session(const struct bpf_prog *prog)
+{
+ return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION;
+}
+
static const struct bpf_func_proto *
kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1574,13 +1585,13 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_func_ip:
if (is_kprobe_multi(prog))
return &bpf_get_func_ip_proto_kprobe_multi;
- if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
+ if (is_uprobe_multi(prog))
return &bpf_get_func_ip_proto_uprobe_multi;
return &bpf_get_func_ip_proto_kprobe;
case BPF_FUNC_get_attach_cookie:
if (is_kprobe_multi(prog))
return &bpf_get_attach_cookie_proto_kmulti;
- if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI)
+ if (is_uprobe_multi(prog))
return &bpf_get_attach_cookie_proto_umulti;
return &bpf_get_attach_cookie_proto_trace;
default:
@@ -3074,6 +3085,7 @@ struct bpf_uprobe {
u64 cookie;
struct uprobe *uprobe;
struct uprobe_consumer consumer;
+ bool session;
};
struct bpf_uprobe_multi_link {
@@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs,
__u64 *data)
{
struct bpf_uprobe *uprobe;
+ int ret;
uprobe = container_of(con, struct bpf_uprobe, consumer);
- return uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
+ ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs);
+ if (uprobe->session)
+ return ret ? UPROBE_HANDLER_IGNORE : 0;
+ return ret;
}
static int
@@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s
struct bpf_uprobe *uprobe;
uprobe = container_of(con, struct bpf_uprobe, consumer);
+ /*
+ * There's chance we could get called with NULL data if we registered uprobe
+ * after it hit entry but before it hit return probe, just ignore it.
+ */
+ if (uprobe->session && !data)
+ return 0;
return uprobe_prog_run(uprobe, func, regs);
}
@@ -3299,7 +3321,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (sizeof(u64) != sizeof(void *))
return -EOPNOTSUPP;
- if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI)
+ if (!is_uprobe_multi(prog))
return -EINVAL;
flags = attr->link_create.uprobe_multi.flags;
@@ -3375,11 +3397,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
uprobes[i].link = link;
- if (flags & BPF_F_UPROBE_MULTI_RETURN)
- uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
- else
+ if (!(flags & BPF_F_UPROBE_MULTI_RETURN))
uprobes[i].consumer.handler = uprobe_multi_link_handler;
-
+ if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog))
+ uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler;
+ if (is_uprobe_session(prog))
+ uprobes[i].session = true;
if (pid)
uprobes[i].consumer.filter = uprobe_multi_link_filter;
}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 7610883c8191..09bdb1867d4a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1116,6 +1116,7 @@ enum bpf_attach_type {
BPF_NETKIT_PRIMARY,
BPF_NETKIT_PEER,
BPF_TRACE_KPROBE_SESSION,
+ BPF_TRACE_UPROBE_SESSION,
__MAX_BPF_ATTACH_TYPE
};
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 712b95e8891b..3587ed7ec359 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -133,6 +133,7 @@ static const char * const attach_type_name[] = {
[BPF_NETKIT_PRIMARY] = "netkit_primary",
[BPF_NETKIT_PEER] = "netkit_peer",
[BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session",
+ [BPF_TRACE_UPROBE_SESSION] = "trace_uprobe_session",
};
static const char * const link_type_name[] = {
--
2.46.1
On Sun, Sep 29, 2024 at 1:58 PM Jiri Olsa <jolsa@kernel.org> wrote: > > Adding support to attach BPF program for entry and return probe > of the same function. This is common use case which at the moment > requires to create two uprobe multi links. > > Adding new BPF_TRACE_UPROBE_SESSION attach type that instructs > kernel to attach single link program to both entry and exit probe. > > It's possible to control execution of the BPF program on return > probe simply by returning zero or non zero from the entry BPF > program execution to execute or not the BPF program on return > probe respectively. > > Acked-by: Andrii Nakryiko <andrii@kernel.org> > Signed-off-by: Jiri Olsa <jolsa@kernel.org> > --- > include/uapi/linux/bpf.h | 1 + > kernel/bpf/syscall.c | 9 ++++++-- > kernel/trace/bpf_trace.c | 39 +++++++++++++++++++++++++++------- > tools/include/uapi/linux/bpf.h | 1 + > tools/lib/bpf/libbpf.c | 1 + > 5 files changed, 41 insertions(+), 10 deletions(-) > > diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h > index 8ab4d8184b9d..77d0bc5fa986 100644 > --- a/include/uapi/linux/bpf.h > +++ b/include/uapi/linux/bpf.h > @@ -1116,6 +1116,7 @@ enum bpf_attach_type { > BPF_NETKIT_PRIMARY, > BPF_NETKIT_PEER, > BPF_TRACE_KPROBE_SESSION, > + BPF_TRACE_UPROBE_SESSION, > __MAX_BPF_ATTACH_TYPE > }; > > diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c > index a8f1808a1ca5..0cf7617e6cb6 100644 > --- a/kernel/bpf/syscall.c > +++ b/kernel/bpf/syscall.c > @@ -3983,10 +3983,14 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, > if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI && > attach_type != BPF_TRACE_UPROBE_MULTI) > return -EINVAL; > + if (prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION && > + attach_type != BPF_TRACE_UPROBE_SESSION) > + return -EINVAL; > if (attach_type != BPF_PERF_EVENT && > attach_type != BPF_TRACE_KPROBE_MULTI && > attach_type != BPF_TRACE_KPROBE_SESSION && > - attach_type != BPF_TRACE_UPROBE_MULTI) > + attach_type != BPF_TRACE_UPROBE_MULTI && > + attach_type != BPF_TRACE_UPROBE_SESSION) > return -EINVAL; > return 0; > case BPF_PROG_TYPE_SCHED_CLS: > @@ -5239,7 +5243,8 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) > else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI || > attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION) > ret = bpf_kprobe_multi_link_attach(attr, prog); > - else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI) > + else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI || > + attr->link_create.attach_type == BPF_TRACE_UPROBE_SESSION) > ret = bpf_uprobe_multi_link_attach(attr, prog); > break; > default: > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > index fdab7ecd8dfa..98e940ec184d 100644 > --- a/kernel/trace/bpf_trace.c > +++ b/kernel/trace/bpf_trace.c > @@ -1557,6 +1557,17 @@ static inline bool is_kprobe_session(const struct bpf_prog *prog) > return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION; > } > > +static inline bool is_uprobe_multi(const struct bpf_prog *prog) > +{ > + return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI || > + prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; > +} > + > +static inline bool is_uprobe_session(const struct bpf_prog *prog) > +{ > + return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; > +} > + > static const struct bpf_func_proto * > kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > { > @@ -1574,13 +1585,13 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) > case BPF_FUNC_get_func_ip: > if (is_kprobe_multi(prog)) > return &bpf_get_func_ip_proto_kprobe_multi; > - if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) > + if (is_uprobe_multi(prog)) > return &bpf_get_func_ip_proto_uprobe_multi; > return &bpf_get_func_ip_proto_kprobe; > case BPF_FUNC_get_attach_cookie: > if (is_kprobe_multi(prog)) > return &bpf_get_attach_cookie_proto_kmulti; > - if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) > + if (is_uprobe_multi(prog)) > return &bpf_get_attach_cookie_proto_umulti; > return &bpf_get_attach_cookie_proto_trace; > default: > @@ -3074,6 +3085,7 @@ struct bpf_uprobe { > u64 cookie; > struct uprobe *uprobe; > struct uprobe_consumer consumer; > + bool session; > }; > > struct bpf_uprobe_multi_link { > @@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, > __u64 *data) > { > struct bpf_uprobe *uprobe; > + int ret; > > uprobe = container_of(con, struct bpf_uprobe, consumer); > - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); > + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs); > + if (uprobe->session) > + return ret ? UPROBE_HANDLER_IGNORE : 0; > + return ret; isn't this a bug that BPF program can return arbitrary value here and, e.g., request uprobe unregistration? Let's return 0, unless uprobe->session? (it would be good to move that into a separate patch with Fixes) > } > > static int > @@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s > struct bpf_uprobe *uprobe; > > uprobe = container_of(con, struct bpf_uprobe, consumer); > + /* > + * There's chance we could get called with NULL data if we registered uprobe > + * after it hit entry but before it hit return probe, just ignore it. > + */ > + if (uprobe->session && !data) > + return 0; why can't handle_uretprobe_chain() do this check instead? We know when we are dealing with session uprobe/uretprobe, so we can filter out these spurious calls, no? > return uprobe_prog_run(uprobe, func, regs); > } > > @@ -3299,7 +3321,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr > if (sizeof(u64) != sizeof(void *)) > return -EOPNOTSUPP; > > - if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI) > + if (!is_uprobe_multi(prog)) > return -EINVAL; > > flags = attr->link_create.uprobe_multi.flags; > @@ -3375,11 +3397,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr > > uprobes[i].link = link; > > - if (flags & BPF_F_UPROBE_MULTI_RETURN) > - uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; > - else > + if (!(flags & BPF_F_UPROBE_MULTI_RETURN)) > uprobes[i].consumer.handler = uprobe_multi_link_handler; > - > + if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog)) > + uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; > + if (is_uprobe_session(prog)) > + uprobes[i].session = true; > if (pid) > uprobes[i].consumer.filter = uprobe_multi_link_filter; > } > diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h > index 7610883c8191..09bdb1867d4a 100644 > --- a/tools/include/uapi/linux/bpf.h > +++ b/tools/include/uapi/linux/bpf.h > @@ -1116,6 +1116,7 @@ enum bpf_attach_type { > BPF_NETKIT_PRIMARY, > BPF_NETKIT_PEER, > BPF_TRACE_KPROBE_SESSION, > + BPF_TRACE_UPROBE_SESSION, > __MAX_BPF_ATTACH_TYPE > }; > > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c > index 712b95e8891b..3587ed7ec359 100644 > --- a/tools/lib/bpf/libbpf.c > +++ b/tools/lib/bpf/libbpf.c > @@ -133,6 +133,7 @@ static const char * const attach_type_name[] = { > [BPF_NETKIT_PRIMARY] = "netkit_primary", > [BPF_NETKIT_PEER] = "netkit_peer", > [BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session", > + [BPF_TRACE_UPROBE_SESSION] = "trace_uprobe_session", > }; > > static const char * const link_type_name[] = { > -- > 2.46.1 >
On Mon, Sep 30, 2024 at 02:36:08PM -0700, Andrii Nakryiko wrote: SNIP > > struct bpf_uprobe_multi_link { > > @@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, > > __u64 *data) > > { > > struct bpf_uprobe *uprobe; > > + int ret; > > > > uprobe = container_of(con, struct bpf_uprobe, consumer); > > - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); > > + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs); > > + if (uprobe->session) > > + return ret ? UPROBE_HANDLER_IGNORE : 0; > > + return ret; > > isn't this a bug that BPF program can return arbitrary value here and, > e.g., request uprobe unregistration? > > Let's return 0, unless uprobe->session? (it would be good to move that > into a separate patch with Fixes) yea there's no use case for uprobe multi user, so let's return 0 as you suggest > > > } > > > > static int > > @@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s > > struct bpf_uprobe *uprobe; > > > > uprobe = container_of(con, struct bpf_uprobe, consumer); > > + /* > > + * There's chance we could get called with NULL data if we registered uprobe > > + * after it hit entry but before it hit return probe, just ignore it. > > + */ > > + if (uprobe->session && !data) > > + return 0; > > why can't handle_uretprobe_chain() do this check instead? We know when > we are dealing with session uprobe/uretprobe, so we can filter out > these spurious calls, no? right, now that we decide session based on presence of both callbacks we have that info in here handle_uretprobe_chain.. but let's still check it for sanity and warn? like if (WARN_ON_ONCE(uprobe->session && !data)) return 0; jirka
On Tue, Oct 1, 2024 at 6:17 AM Jiri Olsa <olsajiri@gmail.com> wrote: > > On Mon, Sep 30, 2024 at 02:36:08PM -0700, Andrii Nakryiko wrote: > > SNIP > > > > struct bpf_uprobe_multi_link { > > > @@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, > > > __u64 *data) > > > { > > > struct bpf_uprobe *uprobe; > > > + int ret; > > > > > > uprobe = container_of(con, struct bpf_uprobe, consumer); > > > - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); > > > + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs); > > > + if (uprobe->session) > > > + return ret ? UPROBE_HANDLER_IGNORE : 0; > > > + return ret; > > > > isn't this a bug that BPF program can return arbitrary value here and, > > e.g., request uprobe unregistration? > > > > Let's return 0, unless uprobe->session? (it would be good to move that > > into a separate patch with Fixes) > > yea there's no use case for uprobe multi user, so let's return > 0 as you suggest > > > > > > } > > > > > > static int > > > @@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s > > > struct bpf_uprobe *uprobe; > > > > > > uprobe = container_of(con, struct bpf_uprobe, consumer); > > > + /* > > > + * There's chance we could get called with NULL data if we registered uprobe > > > + * after it hit entry but before it hit return probe, just ignore it. > > > + */ > > > + if (uprobe->session && !data) > > > + return 0; > > > > why can't handle_uretprobe_chain() do this check instead? We know when > > we are dealing with session uprobe/uretprobe, so we can filter out > > these spurious calls, no? > > right, now that we decide session based on presence of both callbacks > we have that info in here handle_uretprobe_chain.. but let's still check > it for sanity and warn? like > > if (WARN_ON_ONCE(uprobe->session && !data)) You mean to check this *additionally* in uprobe_multi_link_handler(), after core uprobe code already filtered that condition out? It won't hurt, but I'm not sure I see the point? > return 0; > > jirka
On Tue, Oct 01, 2024 at 10:11:13AM -0700, Andrii Nakryiko wrote: > On Tue, Oct 1, 2024 at 6:17 AM Jiri Olsa <olsajiri@gmail.com> wrote: > > > > On Mon, Sep 30, 2024 at 02:36:08PM -0700, Andrii Nakryiko wrote: > > > > SNIP > > > > > > struct bpf_uprobe_multi_link { > > > > @@ -3248,9 +3260,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, > > > > __u64 *data) > > > > { > > > > struct bpf_uprobe *uprobe; > > > > + int ret; > > > > > > > > uprobe = container_of(con, struct bpf_uprobe, consumer); > > > > - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); > > > > + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs); > > > > + if (uprobe->session) > > > > + return ret ? UPROBE_HANDLER_IGNORE : 0; > > > > + return ret; > > > > > > isn't this a bug that BPF program can return arbitrary value here and, > > > e.g., request uprobe unregistration? > > > > > > Let's return 0, unless uprobe->session? (it would be good to move that > > > into a separate patch with Fixes) > > > > yea there's no use case for uprobe multi user, so let's return > > 0 as you suggest > > > > > > > > > } > > > > > > > > static int > > > > @@ -3260,6 +3276,12 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s > > > > struct bpf_uprobe *uprobe; > > > > > > > > uprobe = container_of(con, struct bpf_uprobe, consumer); > > > > + /* > > > > + * There's chance we could get called with NULL data if we registered uprobe > > > > + * after it hit entry but before it hit return probe, just ignore it. > > > > + */ > > > > + if (uprobe->session && !data) > > > > + return 0; > > > > > > why can't handle_uretprobe_chain() do this check instead? We know when > > > we are dealing with session uprobe/uretprobe, so we can filter out > > > these spurious calls, no? > > > > right, now that we decide session based on presence of both callbacks > > we have that info in here handle_uretprobe_chain.. but let's still check > > it for sanity and warn? like > > > > if (WARN_ON_ONCE(uprobe->session && !data)) > > You mean to check this *additionally* in uprobe_multi_link_handler(), > after core uprobe code already filtered that condition out? It won't > hurt, but I'm not sure I see the point? yes, it's cross subsytem call so just to be on safe side for future, but I don't insist jirka
© 2016 - 2024 Red Hat, Inc.