arch/arm64/net/bpf_jit_comp.c | 20 +++++++++++ include/linux/bpf.h | 14 ++++++++ kernel/bpf/bpf_insn_array.c | 63 +++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 6 ++++ 4 files changed, 103 insertions(+)
From: Xu Kuohai <xukuohai@huawei.com>
When BTI is enabled, the indirect jump selftest triggers BTI exception:
Internal error: Oops - BTI: 0000000036000003 [#1] SMP
...
Call trace:
bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
bpf_prog_run_pin_on_cpu+0x140/0x464
bpf_prog_test_run_syscall+0x274/0x3ac
bpf_prog_test_run+0x224/0x2b0
__sys_bpf+0x4cc/0x5c8
__arm64_sys_bpf+0x7c/0x94
invoke_syscall+0x78/0x20c
el0_svc_common+0x11c/0x1c0
do_el0_svc+0x48/0x58
el0_svc+0x54/0x19c
el0t_64_sync_handler+0x84/0x12c
el0t_64_sync+0x198/0x19c
This happens because no BTI instruction is generated by the JIT for
indirect jump targets.
Fix it by emitting BTI instruction for every possible indirect jump
targets when BTI is enabled. The targets are identified by traversing
all instruction arrays of jump table type used by the BPF program,
since indirect jump targets can only be read from instruction arrays
of jump table type.
Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
---
v3:
- Get rid of unnecessary enum definition (Yonghong Song, Anton Protopopov)
v2: https://lore.kernel.org/bpf/20251223085447.139301-1-xukuohai@huaweicloud.com/
- Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
---
arch/arm64/net/bpf_jit_comp.c | 20 +++++++++++
include/linux/bpf.h | 14 ++++++++
kernel/bpf/bpf_insn_array.c | 63 +++++++++++++++++++++++++++++++++++
kernel/bpf/verifier.c | 6 ++++
4 files changed, 103 insertions(+)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 0c4d44bcfbf4..f08f0f9fa04e 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -78,6 +78,7 @@ static const int bpf2a64[] = {
struct jit_ctx {
const struct bpf_prog *prog;
+ unsigned long *indirect_targets;
int idx;
int epilogue_offset;
int *offset;
@@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
return 0;
}
+static bool is_indirect_target(int insn_off, unsigned long *targets_bitmap)
+{
+ return targets_bitmap && test_bit(insn_off, targets_bitmap);
+}
+
/* JITs an eBPF instruction.
* Returns:
* 0 - successfully JITed an 8-byte eBPF instruction.
@@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
int ret;
bool sign_extend;
+ if (is_indirect_target(i, ctx->indirect_targets))
+ emit_bti(A64_BTI_J, ctx);
+
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
@@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
+ if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_jump_table(prog)) {
+ ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (ctx.indirect_targets == NULL) {
+ prog = orig_prog;
+ goto out_off;
+ }
+ bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
+ }
+
ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
@@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog->aux->priv_stack_ptr = NULL;
}
kvfree(ctx.offset);
+ kvfree(ctx.indirect_targets);
out_priv_stack:
kfree(jit_data);
prog->aux->jit_data = NULL;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4e7d72dfbcd4..4a26346263bf 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3893,11 +3893,25 @@ void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
#ifdef CONFIG_BPF_SYSCALL
void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
+void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
+void bpf_prog_mark_jump_table(struct bpf_map *map);
+bool bpf_prog_has_jump_table(const struct bpf_prog *prog);
#else
static inline void
bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
{
}
+static inline void
+bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
+{
+}
+static inline void bpf_prog_mark_jump_table(struct bpf_map *map)
+{
+}
+static inline bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
+{
+ return false;
+}
#endif
static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
index c96630cb75bf..b9b43fdbe8e3 100644
--- a/kernel/bpf/bpf_insn_array.c
+++ b/kernel/bpf/bpf_insn_array.c
@@ -6,6 +6,7 @@
struct bpf_insn_array {
struct bpf_map map;
atomic_t used;
+ bool is_jump_table;
long *ips;
DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
};
@@ -302,3 +303,65 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
}
}
}
+
+void bpf_prog_mark_jump_table(struct bpf_map *map)
+{
+ struct bpf_insn_array *insn_array = cast_insn_array(map);
+
+ insn_array->is_jump_table = true;
+}
+
+static bool is_jump_table(const struct bpf_map *map)
+{
+ struct bpf_insn_array *insn_array;
+
+ if (!is_insn_array(map))
+ return false;
+
+ insn_array = cast_insn_array(map);
+ return insn_array->is_jump_table;
+}
+
+bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
+{
+ int i;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++) {
+ if (is_jump_table(prog->aux->used_maps[i]))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * This function collects possible indirect jump targets in a BPF program. Since indirect jump
+ * targets can only be read from indirect arrays used as jump table, it traverses all jump
+ * tables used by @prog. For each instruction found in the jump tables, it sets the corresponding
+ * bit in @bitmap.
+ */
+void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
+{
+ struct bpf_insn_array *insn_array;
+ struct bpf_map *map;
+ u32 xlated_off;
+ int i, j;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++) {
+ map = prog->aux->used_maps[i];
+ if (!is_jump_table(map))
+ continue;
+
+ insn_array = cast_insn_array(map);
+ for (j = 0; j < map->max_entries; j++) {
+ xlated_off = insn_array->values[j].xlated_off;
+ if (xlated_off == INSN_DELETED)
+ continue;
+ if (xlated_off < prog->aux->subprog_start)
+ continue;
+ xlated_off -= prog->aux->subprog_start;
+ if (xlated_off >= prog->len)
+ continue;
+ __set_bit(xlated_off, bitmap);
+ }
+ }
+}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2de1a736ef69..bc4a269ed06e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20292,6 +20292,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
return -EINVAL;
}
+ /*
+ * Explicitly mark this map as a jump table such that it can be
+ * distinguished later from other instruction arrays
+ */
+ bpf_prog_mark_jump_table(map);
+
for (i = 0; i < n - 1; i++) {
other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
env->insn_idx, env->cur_state->speculative);
--
2.47.3
On Sat, 2025-12-27 at 16:10 +0800, Xu Kuohai wrote:
> From: Xu Kuohai <xukuohai@huawei.com>
>
> When BTI is enabled, the indirect jump selftest triggers BTI exception:
>
> Internal error: Oops - BTI: 0000000036000003 [#1] SMP
> ...
> Call trace:
> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
> bpf_prog_run_pin_on_cpu+0x140/0x464
> bpf_prog_test_run_syscall+0x274/0x3ac
> bpf_prog_test_run+0x224/0x2b0
> __sys_bpf+0x4cc/0x5c8
> __arm64_sys_bpf+0x7c/0x94
> invoke_syscall+0x78/0x20c
> el0_svc_common+0x11c/0x1c0
> do_el0_svc+0x48/0x58
> el0_svc+0x54/0x19c
> el0t_64_sync_handler+0x84/0x12c
> el0t_64_sync+0x198/0x19c
>
> This happens because no BTI instruction is generated by the JIT for
> indirect jump targets.
>
> Fix it by emitting BTI instruction for every possible indirect jump
> targets when BTI is enabled. The targets are identified by traversing
> all instruction arrays of jump table type used by the BPF program,
> since indirect jump targets can only be read from instruction arrays
> of jump table type.
>
> Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
> Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
> ---
> v3:
> - Get rid of unnecessary enum definition (Yonghong Song, Anton Protopopov)
>
> v2: https://lore.kernel.org/bpf/20251223085447.139301-1-xukuohai@huaweicloud.com/
> - Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
>
> v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
> ---
Hi Xu, Anton, Alexei,
Sorry, I'm a bit late to the discussion, ignored this patch-set
because of the "arm64" tag.
What you are fixing here for arm64 will be an issue for x86 with CFI
as well, right?
If that is the case, I think that we should fix this in a "generic"
way from the start. What do you think about the following:
- add a field 'bool indirect_jmp_target' to 'struct bpf_insn_aux_data'
- set this field to true for each jump target inspected by the
verifier.c:check_indirect_jump()
- use this field in the jit to decide if to emit BTI instruction.
Seems a bit simpler than what is discussed in this patch-set.
Wdyt?
[...]
On Wed, Dec 31, 2025 at 11:35 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Sat, 2025-12-27 at 16:10 +0800, Xu Kuohai wrote:
> > From: Xu Kuohai <xukuohai@huawei.com>
> >
> > When BTI is enabled, the indirect jump selftest triggers BTI exception:
> >
> > Internal error: Oops - BTI: 0000000036000003 [#1] SMP
> > ...
> > Call trace:
> > bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
> > bpf_prog_run_pin_on_cpu+0x140/0x464
> > bpf_prog_test_run_syscall+0x274/0x3ac
> > bpf_prog_test_run+0x224/0x2b0
> > __sys_bpf+0x4cc/0x5c8
> > __arm64_sys_bpf+0x7c/0x94
> > invoke_syscall+0x78/0x20c
> > el0_svc_common+0x11c/0x1c0
> > do_el0_svc+0x48/0x58
> > el0_svc+0x54/0x19c
> > el0t_64_sync_handler+0x84/0x12c
> > el0t_64_sync+0x198/0x19c
> >
> > This happens because no BTI instruction is generated by the JIT for
> > indirect jump targets.
> >
> > Fix it by emitting BTI instruction for every possible indirect jump
> > targets when BTI is enabled. The targets are identified by traversing
> > all instruction arrays of jump table type used by the BPF program,
> > since indirect jump targets can only be read from instruction arrays
> > of jump table type.
> >
> > Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
> > Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
> > ---
> > v3:
> > - Get rid of unnecessary enum definition (Yonghong Song, Anton Protopopov)
> >
> > v2: https://lore.kernel.org/bpf/20251223085447.139301-1-xukuohai@huaweicloud.com/
> > - Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
> >
> > v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
> > ---
>
> Hi Xu, Anton, Alexei,
>
> Sorry, I'm a bit late to the discussion, ignored this patch-set
> because of the "arm64" tag.
>
> What you are fixing here for arm64 will be an issue for x86 with CFI
> as well, right?
>
> If that is the case, I think that we should fix this in a "generic"
> way from the start. What do you think about the following:
> - add a field 'bool indirect_jmp_target' to 'struct bpf_insn_aux_data'
yes, thanks, this looks better
> - set this field to true for each jump target inspected by the
> verifier.c:check_indirect_jump()
> - use this field in the jit to decide if to emit BTI instruction.
>
> Seems a bit simpler than what is discussed in this patch-set.
> Wdyt?
>
> [...]
>
On Wed, Dec 31, 2025 at 2:35 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Sat, 2025-12-27 at 16:10 +0800, Xu Kuohai wrote:
> > From: Xu Kuohai <xukuohai@huawei.com>
> >
> > When BTI is enabled, the indirect jump selftest triggers BTI exception:
> >
> > Internal error: Oops - BTI: 0000000036000003 [#1] SMP
> > ...
> > Call trace:
> > bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
> > bpf_prog_run_pin_on_cpu+0x140/0x464
> > bpf_prog_test_run_syscall+0x274/0x3ac
> > bpf_prog_test_run+0x224/0x2b0
> > __sys_bpf+0x4cc/0x5c8
> > __arm64_sys_bpf+0x7c/0x94
> > invoke_syscall+0x78/0x20c
> > el0_svc_common+0x11c/0x1c0
> > do_el0_svc+0x48/0x58
> > el0_svc+0x54/0x19c
> > el0t_64_sync_handler+0x84/0x12c
> > el0t_64_sync+0x198/0x19c
> >
> > This happens because no BTI instruction is generated by the JIT for
> > indirect jump targets.
> >
> > Fix it by emitting BTI instruction for every possible indirect jump
> > targets when BTI is enabled. The targets are identified by traversing
> > all instruction arrays of jump table type used by the BPF program,
> > since indirect jump targets can only be read from instruction arrays
> > of jump table type.
> >
> > Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
> > Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
> > ---
> > v3:
> > - Get rid of unnecessary enum definition (Yonghong Song, Anton Protopopov)
> >
> > v2: https://lore.kernel.org/bpf/20251223085447.139301-1-xukuohai@huaweicloud.com/
> > - Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
> >
> > v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
> > ---
>
> Hi Xu, Anton, Alexei,
>
> Sorry, I'm a bit late to the discussion, ignored this patch-set
> because of the "arm64" tag.
>
> What you are fixing here for arm64 will be an issue for x86 with CFI
> as well, right?
>
> If that is the case, I think that we should fix this in a "generic"
> way from the start. What do you think about the following:
> - add a field 'bool indirect_jmp_target' to 'struct bpf_insn_aux_data'
makes sense to me. u8 :1 pls.
> - set this field to true for each jump target inspected by the
> verifier.c:check_indirect_jump()
> - use this field in the jit to decide if to emit BTI instruction.
>
> Seems a bit simpler than what is discussed in this patch-set.
> Wdyt?
>
> [...]
On 1/1/2026 7:42 AM, Alexei Starovoitov wrote:
> On Wed, Dec 31, 2025 at 2:35 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>>
>> On Sat, 2025-12-27 at 16:10 +0800, Xu Kuohai wrote:
>>> From: Xu Kuohai <xukuohai@huawei.com>
>>>
>>> When BTI is enabled, the indirect jump selftest triggers BTI exception:
>>>
>>> Internal error: Oops - BTI: 0000000036000003 [#1] SMP
>>> ...
>>> Call trace:
>>> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
>>> bpf_prog_run_pin_on_cpu+0x140/0x464
>>> bpf_prog_test_run_syscall+0x274/0x3ac
>>> bpf_prog_test_run+0x224/0x2b0
>>> __sys_bpf+0x4cc/0x5c8
>>> __arm64_sys_bpf+0x7c/0x94
>>> invoke_syscall+0x78/0x20c
>>> el0_svc_common+0x11c/0x1c0
>>> do_el0_svc+0x48/0x58
>>> el0_svc+0x54/0x19c
>>> el0t_64_sync_handler+0x84/0x12c
>>> el0t_64_sync+0x198/0x19c
>>>
>>> This happens because no BTI instruction is generated by the JIT for
>>> indirect jump targets.
>>>
>>> Fix it by emitting BTI instruction for every possible indirect jump
>>> targets when BTI is enabled. The targets are identified by traversing
>>> all instruction arrays of jump table type used by the BPF program,
>>> since indirect jump targets can only be read from instruction arrays
>>> of jump table type.
>>>
>>> Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
>>> Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
>>> ---
>>> v3:
>>> - Get rid of unnecessary enum definition (Yonghong Song, Anton Protopopov)
>>>
>>> v2: https://lore.kernel.org/bpf/20251223085447.139301-1-xukuohai@huaweicloud.com/
>>> - Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
>>>
>>> v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
>>> ---
>>
>> Hi Xu, Anton, Alexei,
>>
>> Sorry, I'm a bit late to the discussion, ignored this patch-set
>> because of the "arm64" tag.
>>
>> What you are fixing here for arm64 will be an issue for x86 with CFI
>> as well, right?
>>
>> If that is the case, I think that we should fix this in a "generic"
>> way from the start. What do you think about the following:
>> - add a field 'bool indirect_jmp_target' to 'struct bpf_insn_aux_data'
>
> makes sense to me. u8 :1 pls.
>
Got it, will do, thanks
>> - set this field to true for each jump target inspected by the
>> verifier.c:check_indirect_jump()
>> - use this field in the jit to decide if to emit BTI instruction.
>>
>> Seems a bit simpler than what is discussed in this patch-set.
>> Wdyt?
>>
>> [...]
On Wed, Dec 31, 2025 at 10:35 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Sat, 2025-12-27 at 16:10 +0800, Xu Kuohai wrote:
> > From: Xu Kuohai <xukuohai@huawei.com>
> >
> > When BTI is enabled, the indirect jump selftest triggers BTI exception:
> >
> > Internal error: Oops - BTI: 0000000036000003 [#1] SMP
> > ...
> > Call trace:
> > bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
> > bpf_prog_run_pin_on_cpu+0x140/0x464
> > bpf_prog_test_run_syscall+0x274/0x3ac
> > bpf_prog_test_run+0x224/0x2b0
> > __sys_bpf+0x4cc/0x5c8
> > __arm64_sys_bpf+0x7c/0x94
> > invoke_syscall+0x78/0x20c
> > el0_svc_common+0x11c/0x1c0
> > do_el0_svc+0x48/0x58
> > el0_svc+0x54/0x19c
> > el0t_64_sync_handler+0x84/0x12c
> > el0t_64_sync+0x198/0x19c
> >
> > This happens because no BTI instruction is generated by the JIT for
> > indirect jump targets.
> >
> > Fix it by emitting BTI instruction for every possible indirect jump
> > targets when BTI is enabled. The targets are identified by traversing
> > all instruction arrays of jump table type used by the BPF program,
> > since indirect jump targets can only be read from instruction arrays
> > of jump table type.
> >
> > Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
> > Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
> > ---
> > v3:
> > - Get rid of unnecessary enum definition (Yonghong Song, Anton Protopopov)
> >
> > v2: https://lore.kernel.org/bpf/20251223085447.139301-1-xukuohai@huaweicloud.com/
> > - Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
> >
> > v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
> > ---
>
> Hi Xu, Anton, Alexei,
>
> Sorry, I'm a bit late to the discussion, ignored this patch-set
> because of the "arm64" tag.
>
> What you are fixing here for arm64 will be an issue for x86 with CFI
> as well, right?
Yes, I just realized this would be a problem for x86 as well where
endbr64 would be needed in place of BTI
>
> If that is the case, I think that we should fix this in a "generic"
> way from the start. What do you think about the following:
> - add a field 'bool indirect_jmp_target' to 'struct bpf_insn_aux_data'
> - set this field to true for each jump target inspected by the
> verifier.c:check_indirect_jump()
> - use this field in the jit to decide if to emit BTI instruction.
>
> Seems a bit simpler than what is discussed in this patch-set.
> Wdyt?
>
> [...]
On Fri, Dec 26, 2025 at 11:49 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote: > > From: Xu Kuohai <xukuohai@huawei.com> > > When BTI is enabled, the indirect jump selftest triggers BTI exception: > > Internal error: Oops - BTI: 0000000036000003 [#1] SMP > ... > Call trace: > bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P) > bpf_prog_run_pin_on_cpu+0x140/0x464 > bpf_prog_test_run_syscall+0x274/0x3ac > bpf_prog_test_run+0x224/0x2b0 > __sys_bpf+0x4cc/0x5c8 > __arm64_sys_bpf+0x7c/0x94 > invoke_syscall+0x78/0x20c > el0_svc_common+0x11c/0x1c0 > do_el0_svc+0x48/0x58 > el0_svc+0x54/0x19c > el0t_64_sync_handler+0x84/0x12c > el0t_64_sync+0x198/0x19c > > This happens because no BTI instruction is generated by the JIT for > indirect jump targets. > > Fix it by emitting BTI instruction for every possible indirect jump > targets when BTI is enabled. The targets are identified by traversing > all instruction arrays of jump table type used by the BPF program, > since indirect jump targets can only be read from instruction arrays > of jump table type. earlier you said: > As Anton noted, even though jump tables are currently the only type > of instruction array, users may still create insn_arrays that are not > used as jump tables. In such cases, there is no need to emit BTIs. yes, but it's not worth it to make this micro optimization in JIT. If it's in insn_array just emit BTI unconditionally. No need to do this filtering. pw-bot: cr
On 12/31/2025 2:20 AM, Alexei Starovoitov wrote: > On Fri, Dec 26, 2025 at 11:49 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote: >> >> From: Xu Kuohai <xukuohai@huawei.com> >> >> When BTI is enabled, the indirect jump selftest triggers BTI exception: >> >> Internal error: Oops - BTI: 0000000036000003 [#1] SMP >> ... >> Call trace: >> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P) >> bpf_prog_run_pin_on_cpu+0x140/0x464 >> bpf_prog_test_run_syscall+0x274/0x3ac >> bpf_prog_test_run+0x224/0x2b0 >> __sys_bpf+0x4cc/0x5c8 >> __arm64_sys_bpf+0x7c/0x94 >> invoke_syscall+0x78/0x20c >> el0_svc_common+0x11c/0x1c0 >> do_el0_svc+0x48/0x58 >> el0_svc+0x54/0x19c >> el0t_64_sync_handler+0x84/0x12c >> el0t_64_sync+0x198/0x19c >> >> This happens because no BTI instruction is generated by the JIT for >> indirect jump targets. >> >> Fix it by emitting BTI instruction for every possible indirect jump >> targets when BTI is enabled. The targets are identified by traversing >> all instruction arrays of jump table type used by the BPF program, >> since indirect jump targets can only be read from instruction arrays >> of jump table type. > > earlier you said: > >> As Anton noted, even though jump tables are currently the only type >> of instruction array, users may still create insn_arrays that are not >> used as jump tables. In such cases, there is no need to emit BTIs. > > yes, but it's not worth it to make this micro optimization in JIT. > If it's in insn_array just emit BTI unconditionally. > No need to do this filtering. > Hmm, that is what the v1 version does. Please take a look. If it’s okay, I’ll resend a rebased version. v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/ > pw-bot: cr
On Tue, Dec 30, 2025 at 6:05 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote: > > On 12/31/2025 2:20 AM, Alexei Starovoitov wrote: > > On Fri, Dec 26, 2025 at 11:49 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote: > >> > >> From: Xu Kuohai <xukuohai@huawei.com> > >> > >> When BTI is enabled, the indirect jump selftest triggers BTI exception: > >> > >> Internal error: Oops - BTI: 0000000036000003 [#1] SMP > >> ... > >> Call trace: > >> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P) > >> bpf_prog_run_pin_on_cpu+0x140/0x464 > >> bpf_prog_test_run_syscall+0x274/0x3ac > >> bpf_prog_test_run+0x224/0x2b0 > >> __sys_bpf+0x4cc/0x5c8 > >> __arm64_sys_bpf+0x7c/0x94 > >> invoke_syscall+0x78/0x20c > >> el0_svc_common+0x11c/0x1c0 > >> do_el0_svc+0x48/0x58 > >> el0_svc+0x54/0x19c > >> el0t_64_sync_handler+0x84/0x12c > >> el0t_64_sync+0x198/0x19c > >> > >> This happens because no BTI instruction is generated by the JIT for > >> indirect jump targets. > >> > >> Fix it by emitting BTI instruction for every possible indirect jump > >> targets when BTI is enabled. The targets are identified by traversing > >> all instruction arrays of jump table type used by the BPF program, > >> since indirect jump targets can only be read from instruction arrays > >> of jump table type. > > > > earlier you said: > > > >> As Anton noted, even though jump tables are currently the only type > >> of instruction array, users may still create insn_arrays that are not > >> used as jump tables. In such cases, there is no need to emit BTIs. > > > > yes, but it's not worth it to make this micro optimization in JIT. > > If it's in insn_array just emit BTI unconditionally. > > No need to do this filtering. > > > > Hmm, that is what the v1 version does. Please take a look. If it’s okay, > I’ll resend a rebased version. > > v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/ I don't think you need bitmap and bpf_prog_collect_indirect_targets(). Just look up each insn in the insn_array one at a time. It's slower, but array is sorted, so binary search should work.
On 12/31/2025 10:16 AM, Alexei Starovoitov wrote:
> On Tue, Dec 30, 2025 at 6:05 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
>>
>> On 12/31/2025 2:20 AM, Alexei Starovoitov wrote:
>>> On Fri, Dec 26, 2025 at 11:49 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
>>>>
>>>> From: Xu Kuohai <xukuohai@huawei.com>
>>>>
>>>> When BTI is enabled, the indirect jump selftest triggers BTI exception:
>>>>
>>>> Internal error: Oops - BTI: 0000000036000003 [#1] SMP
>>>> ...
>>>> Call trace:
>>>> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
>>>> bpf_prog_run_pin_on_cpu+0x140/0x464
>>>> bpf_prog_test_run_syscall+0x274/0x3ac
>>>> bpf_prog_test_run+0x224/0x2b0
>>>> __sys_bpf+0x4cc/0x5c8
>>>> __arm64_sys_bpf+0x7c/0x94
>>>> invoke_syscall+0x78/0x20c
>>>> el0_svc_common+0x11c/0x1c0
>>>> do_el0_svc+0x48/0x58
>>>> el0_svc+0x54/0x19c
>>>> el0t_64_sync_handler+0x84/0x12c
>>>> el0t_64_sync+0x198/0x19c
>>>>
>>>> This happens because no BTI instruction is generated by the JIT for
>>>> indirect jump targets.
>>>>
>>>> Fix it by emitting BTI instruction for every possible indirect jump
>>>> targets when BTI is enabled. The targets are identified by traversing
>>>> all instruction arrays of jump table type used by the BPF program,
>>>> since indirect jump targets can only be read from instruction arrays
>>>> of jump table type.
>>>
>>> earlier you said:
>>>
>>>> As Anton noted, even though jump tables are currently the only type
>>>> of instruction array, users may still create insn_arrays that are not
>>>> used as jump tables. In such cases, there is no need to emit BTIs.
>>>
>>> yes, but it's not worth it to make this micro optimization in JIT.
>>> If it's in insn_array just emit BTI unconditionally.
>>> No need to do this filtering.
>>>
>>
>> Hmm, that is what the v1 version does. Please take a look. If it’s okay,
>> I’ll resend a rebased version.
>>
>> v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
>
> I don't think you need bitmap and bpf_prog_collect_indirect_targets().
> Just look up each insn in the insn_array one at a time.
> It's slower, but array is sorted, so binary search should work.
No, an insn_array is not always sorted, as its ordering depends on how
it is initialized.
For example, with the following change to the selftest:
--- a/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
@@ -75,7 +75,7 @@ static void check_one_to_one_mapping(void)
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- __u32 map_in[] = {0, 1, 2, 3, 4, 5};
+ __u32 map_in[] = {0, 3, 1, 2, 4, 5};
__u32 map_out[] = {0, 1, 2, 3, 4, 5};
__check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
the selftest will create an unsorted map, as shown below:
# bpftool m d i 74
key: 00 00 00 00 value: 00 00 00 00 00 00 00 00 24 00 00 00 00 00 00 00
key: 01 00 00 00 value: 03 00 00 00 03 00 00 00 30 00 00 00 00 00 00 00
key: 02 00 00 00 value: 01 00 00 00 01 00 00 00 28 00 00 00 00 00 00 00
key: 03 00 00 00 value: 02 00 00 00 02 00 00 00 2c 00 00 00 00 00 00 00
key: 04 00 00 00 value: 04 00 00 00 04 00 00 00 34 00 00 00 00 00 00 00
key: 05 00 00 00 value: 05 00 00 00 05 00 00 00 38 00 00 00 00 00 00 00
Found 6 elements
On Wed, Dec 31, 2025 at 7:47 AM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
>
> On 12/31/2025 10:16 AM, Alexei Starovoitov wrote:
> > On Tue, Dec 30, 2025 at 6:05 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
> >>
> >> On 12/31/2025 2:20 AM, Alexei Starovoitov wrote:
> >>> On Fri, Dec 26, 2025 at 11:49 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
> >>>>
> >>>> From: Xu Kuohai <xukuohai@huawei.com>
> >>>>
> >>>> When BTI is enabled, the indirect jump selftest triggers BTI exception:
> >>>>
> >>>> Internal error: Oops - BTI: 0000000036000003 [#1] SMP
> >>>> ...
> >>>> Call trace:
> >>>> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
> >>>> bpf_prog_run_pin_on_cpu+0x140/0x464
> >>>> bpf_prog_test_run_syscall+0x274/0x3ac
> >>>> bpf_prog_test_run+0x224/0x2b0
> >>>> __sys_bpf+0x4cc/0x5c8
> >>>> __arm64_sys_bpf+0x7c/0x94
> >>>> invoke_syscall+0x78/0x20c
> >>>> el0_svc_common+0x11c/0x1c0
> >>>> do_el0_svc+0x48/0x58
> >>>> el0_svc+0x54/0x19c
> >>>> el0t_64_sync_handler+0x84/0x12c
> >>>> el0t_64_sync+0x198/0x19c
> >>>>
> >>>> This happens because no BTI instruction is generated by the JIT for
> >>>> indirect jump targets.
> >>>>
> >>>> Fix it by emitting BTI instruction for every possible indirect jump
> >>>> targets when BTI is enabled. The targets are identified by traversing
> >>>> all instruction arrays of jump table type used by the BPF program,
> >>>> since indirect jump targets can only be read from instruction arrays
> >>>> of jump table type.
> >>>
> >>> earlier you said:
> >>>
> >>>> As Anton noted, even though jump tables are currently the only type
> >>>> of instruction array, users may still create insn_arrays that are not
> >>>> used as jump tables. In such cases, there is no need to emit BTIs.
> >>>
> >>> yes, but it's not worth it to make this micro optimization in JIT.
> >>> If it's in insn_array just emit BTI unconditionally.
> >>> No need to do this filtering.
> >>>
> >>
> >> Hmm, that is what the v1 version does. Please take a look. If it’s okay,
> >> I’ll resend a rebased version.
> >>
> >> v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
> >
> > I don't think you need bitmap and bpf_prog_collect_indirect_targets().
> > Just look up each insn in the insn_array one at a time.
> > It's slower, but array is sorted, so binary search should work.
>
> No, an insn_array is not always sorted, as its ordering depends on how
> it is initialized.
>
> For example, with the following change to the selftest:
>
> --- a/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
> +++ b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
> @@ -75,7 +75,7 @@ static void check_one_to_one_mapping(void)
> BPF_MOV64_IMM(BPF_REG_0, 0),
> BPF_EXIT_INSN(),
> };
> - __u32 map_in[] = {0, 1, 2, 3, 4, 5};
> + __u32 map_in[] = {0, 3, 1, 2, 4, 5};
> __u32 map_out[] = {0, 1, 2, 3, 4, 5};
>
> __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
>
> the selftest will create an unsorted map, as shown below:
>
> # bpftool m d i 74
> key: 00 00 00 00 value: 00 00 00 00 00 00 00 00 24 00 00 00 00 00 00 00
> key: 01 00 00 00 value: 03 00 00 00 03 00 00 00 30 00 00 00 00 00 00 00
> key: 02 00 00 00 value: 01 00 00 00 01 00 00 00 28 00 00 00 00 00 00 00
> key: 03 00 00 00 value: 02 00 00 00 02 00 00 00 2c 00 00 00 00 00 00 00
> key: 04 00 00 00 value: 04 00 00 00 04 00 00 00 34 00 00 00 00 00 00 00
> key: 05 00 00 00 value: 05 00 00 00 05 00 00 00 38 00 00 00 00 00 00 00
> Found 6 elements
Yes, it is not always sorted (jump tables aren't guaranteed to be
sorted or have unique values).
To get rid of bpf_prog_collect_indirect_targets() in internal API,
this is possible to just implement this inside arm JIT. If later it is
needed in more cases, it can be generalized.
Also, how bad is this to generate BTI instructions not only for jump
targets (say, for all instructions in the program)? If this is ok-ish
(this is a really rare condition now), then `bool is_jump_table` might
be dropped for now. (I will add similar code when add static keys and
indirect calls such that they aren't counted for BTI.)
On 12/31/2025 3:06 PM, Anton Protopopov wrote:
> On Wed, Dec 31, 2025 at 7:47 AM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
>>
>> On 12/31/2025 10:16 AM, Alexei Starovoitov wrote:
>>> On Tue, Dec 30, 2025 at 6:05 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
>>>>
>>>> On 12/31/2025 2:20 AM, Alexei Starovoitov wrote:
>>>>> On Fri, Dec 26, 2025 at 11:49 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
>>>>>>
>>>>>> From: Xu Kuohai <xukuohai@huawei.com>
>>>>>>
>>>>>> When BTI is enabled, the indirect jump selftest triggers BTI exception:
>>>>>>
>>>>>> Internal error: Oops - BTI: 0000000036000003 [#1] SMP
>>>>>> ...
>>>>>> Call trace:
>>>>>> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
>>>>>> bpf_prog_run_pin_on_cpu+0x140/0x464
>>>>>> bpf_prog_test_run_syscall+0x274/0x3ac
>>>>>> bpf_prog_test_run+0x224/0x2b0
>>>>>> __sys_bpf+0x4cc/0x5c8
>>>>>> __arm64_sys_bpf+0x7c/0x94
>>>>>> invoke_syscall+0x78/0x20c
>>>>>> el0_svc_common+0x11c/0x1c0
>>>>>> do_el0_svc+0x48/0x58
>>>>>> el0_svc+0x54/0x19c
>>>>>> el0t_64_sync_handler+0x84/0x12c
>>>>>> el0t_64_sync+0x198/0x19c
>>>>>>
>>>>>> This happens because no BTI instruction is generated by the JIT for
>>>>>> indirect jump targets.
>>>>>>
>>>>>> Fix it by emitting BTI instruction for every possible indirect jump
>>>>>> targets when BTI is enabled. The targets are identified by traversing
>>>>>> all instruction arrays of jump table type used by the BPF program,
>>>>>> since indirect jump targets can only be read from instruction arrays
>>>>>> of jump table type.
>>>>>
>>>>> earlier you said:
>>>>>
>>>>>> As Anton noted, even though jump tables are currently the only type
>>>>>> of instruction array, users may still create insn_arrays that are not
>>>>>> used as jump tables. In such cases, there is no need to emit BTIs.
>>>>>
>>>>> yes, but it's not worth it to make this micro optimization in JIT.
>>>>> If it's in insn_array just emit BTI unconditionally.
>>>>> No need to do this filtering.
>>>>>
>>>>
>>>> Hmm, that is what the v1 version does. Please take a look. If it’s okay,
>>>> I’ll resend a rebased version.
>>>>
>>>> v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
>>>
>>> I don't think you need bitmap and bpf_prog_collect_indirect_targets().
>>> Just look up each insn in the insn_array one at a time.
>>> It's slower, but array is sorted, so binary search should work.
>>
>> No, an insn_array is not always sorted, as its ordering depends on how
>> it is initialized.
>>
>> For example, with the following change to the selftest:
>>
>> --- a/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
>> +++ b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
>> @@ -75,7 +75,7 @@ static void check_one_to_one_mapping(void)
>> BPF_MOV64_IMM(BPF_REG_0, 0),
>> BPF_EXIT_INSN(),
>> };
>> - __u32 map_in[] = {0, 1, 2, 3, 4, 5};
>> + __u32 map_in[] = {0, 3, 1, 2, 4, 5};
>> __u32 map_out[] = {0, 1, 2, 3, 4, 5};
>>
>> __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
>>
>> the selftest will create an unsorted map, as shown below:
>>
>> # bpftool m d i 74
>> key: 00 00 00 00 value: 00 00 00 00 00 00 00 00 24 00 00 00 00 00 00 00
>> key: 01 00 00 00 value: 03 00 00 00 03 00 00 00 30 00 00 00 00 00 00 00
>> key: 02 00 00 00 value: 01 00 00 00 01 00 00 00 28 00 00 00 00 00 00 00
>> key: 03 00 00 00 value: 02 00 00 00 02 00 00 00 2c 00 00 00 00 00 00 00
>> key: 04 00 00 00 value: 04 00 00 00 04 00 00 00 34 00 00 00 00 00 00 00
>> key: 05 00 00 00 value: 05 00 00 00 05 00 00 00 38 00 00 00 00 00 00 00
>> Found 6 elements
>
> Yes, it is not always sorted (jump tables aren't guaranteed to be
> sorted or have unique values).
>
> To get rid of bpf_prog_collect_indirect_targets() in internal API,
> this is possible to just implement this inside arm JIT. If later it is
> needed in more cases, it can be generalized.
>
> Also, how bad is this to generate BTI instructions not only for jump
> targets (say, for all instructions in the program)? If this is ok-ish
> (this is a really rare condition now), then `bool is_jump_table` might
> be dropped for now. (I will add similar code when add static keys and
> indirect calls such that they aren't counted for BTI.)
>
IIUC, in practice insn_array usually contains only a few elements, so using
a simple linear search should be sufficient.
In corner cases, such as when all instructions are included in an insn_array
but only a few are actually used as indirect jump targets, even is_jump_table
would not prevent BTI from being jited for non-jump-target instructions. To
completely avoid this, more precise information about which instructions are
actually used as indirect jump targets would be required.
On Wed, Dec 31, 2025 at 1:22 AM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
>
> On 12/31/2025 3:06 PM, Anton Protopopov wrote:
> > On Wed, Dec 31, 2025 at 7:47 AM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
> >>
> >> On 12/31/2025 10:16 AM, Alexei Starovoitov wrote:
> >>> On Tue, Dec 30, 2025 at 6:05 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
> >>>>
> >>>> On 12/31/2025 2:20 AM, Alexei Starovoitov wrote:
> >>>>> On Fri, Dec 26, 2025 at 11:49 PM Xu Kuohai <xukuohai@huaweicloud.com> wrote:
> >>>>>>
> >>>>>> From: Xu Kuohai <xukuohai@huawei.com>
> >>>>>>
> >>>>>> When BTI is enabled, the indirect jump selftest triggers BTI exception:
> >>>>>>
> >>>>>> Internal error: Oops - BTI: 0000000036000003 [#1] SMP
> >>>>>> ...
> >>>>>> Call trace:
> >>>>>> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
> >>>>>> bpf_prog_run_pin_on_cpu+0x140/0x464
> >>>>>> bpf_prog_test_run_syscall+0x274/0x3ac
> >>>>>> bpf_prog_test_run+0x224/0x2b0
> >>>>>> __sys_bpf+0x4cc/0x5c8
> >>>>>> __arm64_sys_bpf+0x7c/0x94
> >>>>>> invoke_syscall+0x78/0x20c
> >>>>>> el0_svc_common+0x11c/0x1c0
> >>>>>> do_el0_svc+0x48/0x58
> >>>>>> el0_svc+0x54/0x19c
> >>>>>> el0t_64_sync_handler+0x84/0x12c
> >>>>>> el0t_64_sync+0x198/0x19c
> >>>>>>
> >>>>>> This happens because no BTI instruction is generated by the JIT for
> >>>>>> indirect jump targets.
> >>>>>>
> >>>>>> Fix it by emitting BTI instruction for every possible indirect jump
> >>>>>> targets when BTI is enabled. The targets are identified by traversing
> >>>>>> all instruction arrays of jump table type used by the BPF program,
> >>>>>> since indirect jump targets can only be read from instruction arrays
> >>>>>> of jump table type.
> >>>>>
> >>>>> earlier you said:
> >>>>>
> >>>>>> As Anton noted, even though jump tables are currently the only type
> >>>>>> of instruction array, users may still create insn_arrays that are not
> >>>>>> used as jump tables. In such cases, there is no need to emit BTIs.
> >>>>>
> >>>>> yes, but it's not worth it to make this micro optimization in JIT.
> >>>>> If it's in insn_array just emit BTI unconditionally.
> >>>>> No need to do this filtering.
> >>>>>
> >>>>
> >>>> Hmm, that is what the v1 version does. Please take a look. If it’s okay,
> >>>> I’ll resend a rebased version.
> >>>>
> >>>> v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
> >>>
> >>> I don't think you need bitmap and bpf_prog_collect_indirect_targets().
> >>> Just look up each insn in the insn_array one at a time.
> >>> It's slower, but array is sorted, so binary search should work.
> >>
> >> No, an insn_array is not always sorted, as its ordering depends on how
> >> it is initialized.
> >>
> >> For example, with the following change to the selftest:
> >>
> >> --- a/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
> >> +++ b/tools/testing/selftests/bpf/prog_tests/bpf_insn_array.c
> >> @@ -75,7 +75,7 @@ static void check_one_to_one_mapping(void)
> >> BPF_MOV64_IMM(BPF_REG_0, 0),
> >> BPF_EXIT_INSN(),
> >> };
> >> - __u32 map_in[] = {0, 1, 2, 3, 4, 5};
> >> + __u32 map_in[] = {0, 3, 1, 2, 4, 5};
> >> __u32 map_out[] = {0, 1, 2, 3, 4, 5};
> >>
> >> __check_success(insns, ARRAY_SIZE(insns), map_in, map_out);
> >>
> >> the selftest will create an unsorted map, as shown below:
> >>
> >> # bpftool m d i 74
> >> key: 00 00 00 00 value: 00 00 00 00 00 00 00 00 24 00 00 00 00 00 00 00
> >> key: 01 00 00 00 value: 03 00 00 00 03 00 00 00 30 00 00 00 00 00 00 00
> >> key: 02 00 00 00 value: 01 00 00 00 01 00 00 00 28 00 00 00 00 00 00 00
> >> key: 03 00 00 00 value: 02 00 00 00 02 00 00 00 2c 00 00 00 00 00 00 00
> >> key: 04 00 00 00 value: 04 00 00 00 04 00 00 00 34 00 00 00 00 00 00 00
> >> key: 05 00 00 00 value: 05 00 00 00 05 00 00 00 38 00 00 00 00 00 00 00
> >> Found 6 elements
> >
> > Yes, it is not always sorted (jump tables aren't guaranteed to be
> > sorted or have unique values).
> >
> > To get rid of bpf_prog_collect_indirect_targets() in internal API,
> > this is possible to just implement this inside arm JIT. If later it is
> > needed in more cases, it can be generalized.
> >
> > Also, how bad is this to generate BTI instructions not only for jump
> > targets (say, for all instructions in the program)? If this is ok-ish
> > (this is a really rare condition now), then `bool is_jump_table` might
> > be dropped for now. (I will add similar code when add static keys and
> > indirect calls such that they aren't counted for BTI.)
> >
>
> IIUC, in practice insn_array usually contains only a few elements, so using
> a simple linear search should be sufficient.
I don't see why it cannot be sorted after populating or
if upcoming bpf-static-branch logic needs a certain order and
cannot tolerate sorting-by-kernel we can enforce
sorted order as part of the contract. At insn_array freeze time
the kernel can check whether it's sorted and if not reject the freeze.
I suspect sooner or later we will need efficient search in the insn array.
© 2016 - 2026 Red Hat, Inc.