From: Donglin Peng <pengdonglin@xiaomi.com>
This patch introduces binary search optimization for BTF type lookups
when the BTF instance contains sorted types.
The optimization significantly improves performance when searching for
types in large BTF instances with sorted types. For unsorted BTF, the
implementation falls back to the original linear search.
Cc: Eduard Zingerman <eddyz87@gmail.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Ihor Solodrai <ihor.solodrai@linux.dev>
Cc: Xiaoqin Zhang <zhangxiaoqin@xiaomi.com>
Signed-off-by: Donglin Peng <pengdonglin@xiaomi.com>
---
tools/lib/bpf/btf.c | 90 +++++++++++++++++++++++++++++++++------------
1 file changed, 66 insertions(+), 24 deletions(-)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index bf75f770d29a..02407a022afb 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -92,6 +92,8 @@ struct btf {
* - for split BTF counts number of types added on top of base BTF.
*/
__u32 nr_types;
+ /* the start IDs of named types in sorted BTF */
+ int named_start_id;
/* if not NULL, points to the base BTF on top of which the current
* split BTF is based
*/
@@ -897,46 +899,83 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
return type_id;
}
-__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
+static __s32 btf_find_type_by_name_bsearch(const struct btf *btf, const char *name,
+ __s32 start_id)
{
- __u32 i, nr_types = btf__type_cnt(btf);
-
- if (!strcmp(type_name, "void"))
- return 0;
-
- for (i = 1; i < nr_types; i++) {
- const struct btf_type *t = btf__type_by_id(btf, i);
- const char *name = btf__name_by_offset(btf, t->name_off);
-
- if (name && !strcmp(type_name, name))
- return i;
+ const struct btf_type *t;
+ const char *tname;
+ __s32 l, r, m;
+
+ l = start_id;
+ r = btf__type_cnt(btf) - 1;
+ while (l <= r) {
+ m = l + (r - l) / 2;
+ t = btf_type_by_id(btf, m);
+ tname = btf__str_by_offset(btf, t->name_off);
+ if (strcmp(tname, name) >= 0) {
+ if (l == r)
+ return r;
+ r = m;
+ } else {
+ l = m + 1;
+ }
}
- return libbpf_err(-ENOENT);
+ return btf__type_cnt(btf);
}
static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
- const char *type_name, __u32 kind)
+ const char *type_name, __s32 kind)
{
- __u32 i, nr_types = btf__type_cnt(btf);
+ const struct btf_type *t;
+ const char *tname;
+ __s32 idx;
+
+ if (start_id < btf->start_id) {
+ idx = btf_find_by_name_kind(btf->base_btf, start_id,
+ type_name, kind);
+ if (idx >= 0)
+ return idx;
+ start_id = btf->start_id;
+ }
- if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
+ if (kind == BTF_KIND_UNKN || strcmp(type_name, "void") == 0)
return 0;
- for (i = start_id; i < nr_types; i++) {
- const struct btf_type *t = btf__type_by_id(btf, i);
- const char *name;
+ if (btf->named_start_id > 0 && type_name[0]) {
+ start_id = max(start_id, btf->named_start_id);
+ idx = btf_find_type_by_name_bsearch(btf, type_name, start_id);
+ for (; idx < btf__type_cnt(btf); idx++) {
+ t = btf__type_by_id(btf, idx);
+ tname = btf__str_by_offset(btf, t->name_off);
+ if (strcmp(tname, type_name) != 0)
+ return libbpf_err(-ENOENT);
+ if (kind < 0 || btf_kind(t) == kind)
+ return idx;
+ }
+ } else {
+ __u32 i, total;
- if (btf_kind(t) != kind)
- continue;
- name = btf__name_by_offset(btf, t->name_off);
- if (name && !strcmp(type_name, name))
- return i;
+ total = btf__type_cnt(btf);
+ for (i = start_id; i < total; i++) {
+ t = btf_type_by_id(btf, i);
+ if (kind > 0 && btf_kind(t) != kind)
+ continue;
+ tname = btf__str_by_offset(btf, t->name_off);
+ if (strcmp(tname, type_name) == 0)
+ return i;
+ }
}
return libbpf_err(-ENOENT);
}
+/* the kind value of -1 indicates that kind matching should be skipped */
+__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
+{
+ return btf_find_by_name_kind(btf, 1, type_name, -1);
+}
+
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
__u32 kind)
{
@@ -1006,6 +1045,7 @@ static struct btf *btf_new_empty(struct btf *base_btf)
btf->fd = -1;
btf->ptr_sz = sizeof(void *);
btf->swapped_endian = false;
+ btf->named_start_id = 0;
if (base_btf) {
btf->base_btf = base_btf;
@@ -1057,6 +1097,7 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, b
btf->start_id = 1;
btf->start_str_off = 0;
btf->fd = -1;
+ btf->named_start_id = 0;
if (base_btf) {
btf->base_btf = base_btf;
@@ -1715,6 +1756,7 @@ static void btf_invalidate_raw_data(struct btf *btf)
free(btf->raw_data_swapped);
btf->raw_data_swapped = NULL;
}
+ btf->named_start_id = 0;
}
/* Ensure BTF is ready to be modified (by splitting into a three memory
--
2.34.1
On Fri, Jan 9, 2026 at 5:00 AM Donglin Peng <dolinux.peng@gmail.com> wrote:
>
> From: Donglin Peng <pengdonglin@xiaomi.com>
>
> This patch introduces binary search optimization for BTF type lookups
> when the BTF instance contains sorted types.
>
> The optimization significantly improves performance when searching for
> types in large BTF instances with sorted types. For unsorted BTF, the
> implementation falls back to the original linear search.
>
> Cc: Eduard Zingerman <eddyz87@gmail.com>
> Cc: Alexei Starovoitov <ast@kernel.org>
> Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com>
> Cc: Alan Maguire <alan.maguire@oracle.com>
> Cc: Ihor Solodrai <ihor.solodrai@linux.dev>
> Cc: Xiaoqin Zhang <zhangxiaoqin@xiaomi.com>
> Signed-off-by: Donglin Peng <pengdonglin@xiaomi.com>
> ---
> tools/lib/bpf/btf.c | 90 +++++++++++++++++++++++++++++++++------------
> 1 file changed, 66 insertions(+), 24 deletions(-)
>
[...]
> static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
> - const char *type_name, __u32 kind)
> + const char *type_name, __s32 kind)
> {
> - __u32 i, nr_types = btf__type_cnt(btf);
> + const struct btf_type *t;
> + const char *tname;
> + __s32 idx;
> +
> + if (start_id < btf->start_id) {
> + idx = btf_find_by_name_kind(btf->base_btf, start_id,
> + type_name, kind);
> + if (idx >= 0)
> + return idx;
> + start_id = btf->start_id;
> + }
>
> - if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
> + if (kind == BTF_KIND_UNKN || strcmp(type_name, "void") == 0)
> return 0;
>
> - for (i = start_id; i < nr_types; i++) {
> - const struct btf_type *t = btf__type_by_id(btf, i);
> - const char *name;
> + if (btf->named_start_id > 0 && type_name[0]) {
> + start_id = max(start_id, btf->named_start_id);
> + idx = btf_find_type_by_name_bsearch(btf, type_name, start_id);
> + for (; idx < btf__type_cnt(btf); idx++) {
I hope the compiler will optimize out btf__type_cnt() and won't be
recalculating it all the time, but I'd absolutely make sure by keeping
nr_types local variable which you deleted for some reason. Please
include in your follow up.
> + t = btf__type_by_id(btf, idx);
> + tname = btf__str_by_offset(btf, t->name_off);
> + if (strcmp(tname, type_name) != 0)
> + return libbpf_err(-ENOENT);
> + if (kind < 0 || btf_kind(t) == kind)
> + return idx;
> + }
> + } else {
> + __u32 i, total;
>
> - if (btf_kind(t) != kind)
> - continue;
> - name = btf__name_by_offset(btf, t->name_off);
> - if (name && !strcmp(type_name, name))
> - return i;
> + total = btf__type_cnt(btf);
and here you have a local total pre-calculated. Just move it outside
of this if/else and use in both branches
(I adjusted this trivially while applying, also unified idx,i -> id
> + for (i = start_id; i < total; i++) {
> + t = btf_type_by_id(btf, i);
> + if (kind > 0 && btf_kind(t) != kind)
> + continue;
> + tname = btf__str_by_offset(btf, t->name_off);
> + if (strcmp(tname, type_name) == 0)
> + return i;
> + }
> }
>
[...]
On Wed, Jan 14, 2026 at 8:29 AM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Fri, Jan 9, 2026 at 5:00 AM Donglin Peng <dolinux.peng@gmail.com> wrote:
> >
> > From: Donglin Peng <pengdonglin@xiaomi.com>
> >
> > This patch introduces binary search optimization for BTF type lookups
> > when the BTF instance contains sorted types.
> >
> > The optimization significantly improves performance when searching for
> > types in large BTF instances with sorted types. For unsorted BTF, the
> > implementation falls back to the original linear search.
> >
> > Cc: Eduard Zingerman <eddyz87@gmail.com>
> > Cc: Alexei Starovoitov <ast@kernel.org>
> > Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com>
> > Cc: Alan Maguire <alan.maguire@oracle.com>
> > Cc: Ihor Solodrai <ihor.solodrai@linux.dev>
> > Cc: Xiaoqin Zhang <zhangxiaoqin@xiaomi.com>
> > Signed-off-by: Donglin Peng <pengdonglin@xiaomi.com>
> > ---
> > tools/lib/bpf/btf.c | 90 +++++++++++++++++++++++++++++++++------------
> > 1 file changed, 66 insertions(+), 24 deletions(-)
> >
>
> [...]
>
> > static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
> > - const char *type_name, __u32 kind)
> > + const char *type_name, __s32 kind)
> > {
> > - __u32 i, nr_types = btf__type_cnt(btf);
> > + const struct btf_type *t;
> > + const char *tname;
> > + __s32 idx;
> > +
> > + if (start_id < btf->start_id) {
> > + idx = btf_find_by_name_kind(btf->base_btf, start_id,
> > + type_name, kind);
> > + if (idx >= 0)
> > + return idx;
> > + start_id = btf->start_id;
> > + }
> >
> > - if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
> > + if (kind == BTF_KIND_UNKN || strcmp(type_name, "void") == 0)
> > return 0;
> >
> > - for (i = start_id; i < nr_types; i++) {
> > - const struct btf_type *t = btf__type_by_id(btf, i);
> > - const char *name;
> > + if (btf->named_start_id > 0 && type_name[0]) {
> > + start_id = max(start_id, btf->named_start_id);
> > + idx = btf_find_type_by_name_bsearch(btf, type_name, start_id);
> > + for (; idx < btf__type_cnt(btf); idx++) {
>
> I hope the compiler will optimize out btf__type_cnt() and won't be
> recalculating it all the time, but I'd absolutely make sure by keeping
> nr_types local variable which you deleted for some reason. Please
> include in your follow up.
Thanks, I will optimize it.
>
> > + t = btf__type_by_id(btf, idx);
> > + tname = btf__str_by_offset(btf, t->name_off);
> > + if (strcmp(tname, type_name) != 0)
> > + return libbpf_err(-ENOENT);
> > + if (kind < 0 || btf_kind(t) == kind)
> > + return idx;
> > + }
> > + } else {
> > + __u32 i, total;
> >
> > - if (btf_kind(t) != kind)
> > - continue;
> > - name = btf__name_by_offset(btf, t->name_off);
> > - if (name && !strcmp(type_name, name))
> > - return i;
> > + total = btf__type_cnt(btf);
>
> and here you have a local total pre-calculated. Just move it outside
> of this if/else and use in both branches
Thanks, I will fix it.
>
> (I adjusted this trivially while applying, also unified idx,i -> id
Thanks, I will fix it in v13.
>
>
> > + for (i = start_id; i < total; i++) {
> > + t = btf_type_by_id(btf, i);
> > + if (kind > 0 && btf_kind(t) != kind)
> > + continue;
> > + tname = btf__str_by_offset(btf, t->name_off);
> > + if (strcmp(tname, type_name) == 0)
> > + return i;
> > + }
> > }
> >
>
> [...]
On Wed, Jan 14, 2026 at 9:49 AM Donglin Peng <dolinux.peng@gmail.com> wrote:
>
> On Wed, Jan 14, 2026 at 8:29 AM Andrii Nakryiko
> <andrii.nakryiko@gmail.com> wrote:
> >
> > On Fri, Jan 9, 2026 at 5:00 AM Donglin Peng <dolinux.peng@gmail.com> wrote:
> > >
> > > From: Donglin Peng <pengdonglin@xiaomi.com>
> > >
> > > This patch introduces binary search optimization for BTF type lookups
> > > when the BTF instance contains sorted types.
> > >
> > > The optimization significantly improves performance when searching for
> > > types in large BTF instances with sorted types. For unsorted BTF, the
> > > implementation falls back to the original linear search.
> > >
> > > Cc: Eduard Zingerman <eddyz87@gmail.com>
> > > Cc: Alexei Starovoitov <ast@kernel.org>
> > > Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com>
> > > Cc: Alan Maguire <alan.maguire@oracle.com>
> > > Cc: Ihor Solodrai <ihor.solodrai@linux.dev>
> > > Cc: Xiaoqin Zhang <zhangxiaoqin@xiaomi.com>
> > > Signed-off-by: Donglin Peng <pengdonglin@xiaomi.com>
> > > ---
> > > tools/lib/bpf/btf.c | 90 +++++++++++++++++++++++++++++++++------------
> > > 1 file changed, 66 insertions(+), 24 deletions(-)
> > >
> >
> > [...]
> >
> > > static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
> > > - const char *type_name, __u32 kind)
> > > + const char *type_name, __s32 kind)
> > > {
> > > - __u32 i, nr_types = btf__type_cnt(btf);
> > > + const struct btf_type *t;
> > > + const char *tname;
> > > + __s32 idx;
> > > +
> > > + if (start_id < btf->start_id) {
> > > + idx = btf_find_by_name_kind(btf->base_btf, start_id,
> > > + type_name, kind);
> > > + if (idx >= 0)
> > > + return idx;
> > > + start_id = btf->start_id;
> > > + }
> > >
> > > - if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
> > > + if (kind == BTF_KIND_UNKN || strcmp(type_name, "void") == 0)
> > > return 0;
> > >
> > > - for (i = start_id; i < nr_types; i++) {
> > > - const struct btf_type *t = btf__type_by_id(btf, i);
> > > - const char *name;
> > > + if (btf->named_start_id > 0 && type_name[0]) {
> > > + start_id = max(start_id, btf->named_start_id);
> > > + idx = btf_find_type_by_name_bsearch(btf, type_name, start_id);
> > > + for (; idx < btf__type_cnt(btf); idx++) {
> >
> > I hope the compiler will optimize out btf__type_cnt() and won't be
> > recalculating it all the time, but I'd absolutely make sure by keeping
> > nr_types local variable which you deleted for some reason. Please
> > include in your follow up.
>
> Thanks, I will optimize it.
>
> >
> > > + t = btf__type_by_id(btf, idx);
> > > + tname = btf__str_by_offset(btf, t->name_off);
> > > + if (strcmp(tname, type_name) != 0)
> > > + return libbpf_err(-ENOENT);
> > > + if (kind < 0 || btf_kind(t) == kind)
> > > + return idx;
> > > + }
> > > + } else {
> > > + __u32 i, total;
> > >
> > > - if (btf_kind(t) != kind)
> > > - continue;
> > > - name = btf__name_by_offset(btf, t->name_off);
> > > - if (name && !strcmp(type_name, name))
> > > - return i;
> > > + total = btf__type_cnt(btf);
> >
> > and here you have a local total pre-calculated. Just move it outside
> > of this if/else and use in both branches
>
> Thanks, I will fix it.
>
> >
> > (I adjusted this trivially while applying, also unified idx,i -> id
Thank you for fixing it.
>
> Thanks, I will fix it in v13.
>
> >
> >
> > > + for (i = start_id; i < total; i++) {
> > > + t = btf_type_by_id(btf, i);
> > > + if (kind > 0 && btf_kind(t) != kind)
> > > + continue;
> > > + tname = btf__str_by_offset(btf, t->name_off);
> > > + if (strcmp(tname, type_name) == 0)
> > > + return i;
> > > + }
> > > }
> > >
> >
> > [...]
© 2016 - 2026 Red Hat, Inc.