arch/arm64/net/bpf_jit_comp.c | 20 +++++++++++ include/linux/bpf.h | 19 +++++++++++ kernel/bpf/bpf_insn_array.c | 63 +++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 6 ++++ 4 files changed, 108 insertions(+)
From: Xu Kuohai <xukuohai@huawei.com>
When BTI is enabled, the indirect jump selftest triggers BTI exception:
Internal error: Oops - BTI: 0000000036000003 [#1] SMP
...
Call trace:
bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
bpf_prog_run_pin_on_cpu+0x140/0x464
bpf_prog_test_run_syscall+0x274/0x3ac
bpf_prog_test_run+0x224/0x2b0
__sys_bpf+0x4cc/0x5c8
__arm64_sys_bpf+0x7c/0x94
invoke_syscall+0x78/0x20c
el0_svc_common+0x11c/0x1c0
do_el0_svc+0x48/0x58
el0_svc+0x54/0x19c
el0t_64_sync_handler+0x84/0x12c
el0t_64_sync+0x198/0x19c
This happens because no BTI instruction is generated by the JIT for
indirect jump targets.
Fix it by emitting BTI instruction for every possible indirect jump
targets when BTI is enabled. The targets are identified by traversing
all instruction arrays of jump table type used by the BPF program,
since indirect jump targets can only be read from instruction arrays
of jump table type.
Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
---
v2:
- Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
---
arch/arm64/net/bpf_jit_comp.c | 20 +++++++++++
include/linux/bpf.h | 19 +++++++++++
kernel/bpf/bpf_insn_array.c | 63 +++++++++++++++++++++++++++++++++++
kernel/bpf/verifier.c | 6 ++++
4 files changed, 108 insertions(+)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 0c4d44bcfbf4..f08f0f9fa04e 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -78,6 +78,7 @@ static const int bpf2a64[] = {
struct jit_ctx {
const struct bpf_prog *prog;
+ unsigned long *indirect_targets;
int idx;
int epilogue_offset;
int *offset;
@@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
return 0;
}
+static bool is_indirect_target(int insn_off, unsigned long *targets_bitmap)
+{
+ return targets_bitmap && test_bit(insn_off, targets_bitmap);
+}
+
/* JITs an eBPF instruction.
* Returns:
* 0 - successfully JITed an 8-byte eBPF instruction.
@@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
int ret;
bool sign_extend;
+ if (is_indirect_target(i, ctx->indirect_targets))
+ emit_bti(A64_BTI_J, ctx);
+
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
@@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
+ if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_jump_table(prog)) {
+ ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (ctx.indirect_targets == NULL) {
+ prog = orig_prog;
+ goto out_off;
+ }
+ bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
+ }
+
ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
@@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
prog->aux->priv_stack_ptr = NULL;
}
kvfree(ctx.offset);
+ kvfree(ctx.indirect_targets);
out_priv_stack:
kfree(jit_data);
prog->aux->jit_data = NULL;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index da6a00dd313f..a3a89d4b4dae 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -3875,13 +3875,32 @@ void bpf_insn_array_release(struct bpf_map *map);
void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
+enum bpf_insn_array_type {
+ BPF_INSN_ARRAY_VOID,
+ BPF_INSN_ARRAY_JUMP_TABLE,
+};
+
#ifdef CONFIG_BPF_SYSCALL
void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
+void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
+void bpf_prog_set_insn_array_type(struct bpf_map *map, int type);
+bool bpf_prog_has_jump_table(const struct bpf_prog *prog);
#else
static inline void
bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
{
}
+static inline void
+bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
+{
+}
+static inline void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
+{
+}
+static inline bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
+{
+ return false;
+}
#endif
static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
index c96630cb75bf..fbffc865feab 100644
--- a/kernel/bpf/bpf_insn_array.c
+++ b/kernel/bpf/bpf_insn_array.c
@@ -5,6 +5,7 @@
struct bpf_insn_array {
struct bpf_map map;
+ int type;
atomic_t used;
long *ips;
DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
@@ -159,6 +160,17 @@ static bool is_insn_array(const struct bpf_map *map)
return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
}
+static bool is_jump_table(const struct bpf_map *map)
+{
+ struct bpf_insn_array *insn_array;
+
+ if (!is_insn_array(map))
+ return false;
+
+ insn_array = cast_insn_array(map);
+ return insn_array->type == BPF_INSN_ARRAY_JUMP_TABLE;
+}
+
static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
const struct bpf_prog *prog)
{
@@ -302,3 +314,54 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
}
}
}
+
+bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
+{
+ int i;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++) {
+ if (is_jump_table(prog->aux->used_maps[i]))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * This function collects possible indirect jump targets in a BPF program. Since indirect jump
+ * targets can only be read from indirect arrays used as jump table, it traverses all jump
+ * tables used by @prog. For each instruction found in the jump tables, it sets the corresponding
+ * bit in @bitmap.
+ */
+void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
+{
+ struct bpf_insn_array *insn_array;
+ struct bpf_map *map;
+ u32 xlated_off;
+ int i, j;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++) {
+ map = prog->aux->used_maps[i];
+ if (!is_jump_table(map))
+ continue;
+
+ insn_array = cast_insn_array(map);
+ for (j = 0; j < map->max_entries; j++) {
+ xlated_off = insn_array->values[j].xlated_off;
+ if (xlated_off == INSN_DELETED)
+ continue;
+ if (xlated_off < prog->aux->subprog_start)
+ continue;
+ xlated_off -= prog->aux->subprog_start;
+ if (xlated_off >= prog->len)
+ continue;
+ __set_bit(xlated_off, bitmap);
+ }
+ }
+}
+
+void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
+{
+ struct bpf_insn_array *insn_array = cast_insn_array(map);
+
+ insn_array->type = type;
+}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d6b8a77fbe3b..ee6f4ddfbb79 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20288,6 +20288,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
return -EINVAL;
}
+ /*
+ * Explicitly mark this map as a jump table such that it can be
+ * distinguished later from other instruction arrays
+ */
+ bpf_prog_set_insn_array_type(map, BPF_INSN_ARRAY_JUMP_TABLE);
+
for (i = 0; i < n - 1; i++) {
other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
env->insn_idx, env->cur_state->speculative);
--
2.47.3
On 12/23/25 12:54 AM, Xu Kuohai wrote:
> From: Xu Kuohai <xukuohai@huawei.com>
>
> When BTI is enabled, the indirect jump selftest triggers BTI exception:
>
> Internal error: Oops - BTI: 0000000036000003 [#1] SMP
> ...
> Call trace:
> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
> bpf_prog_run_pin_on_cpu+0x140/0x464
> bpf_prog_test_run_syscall+0x274/0x3ac
> bpf_prog_test_run+0x224/0x2b0
> __sys_bpf+0x4cc/0x5c8
> __arm64_sys_bpf+0x7c/0x94
> invoke_syscall+0x78/0x20c
> el0_svc_common+0x11c/0x1c0
> do_el0_svc+0x48/0x58
> el0_svc+0x54/0x19c
> el0t_64_sync_handler+0x84/0x12c
> el0t_64_sync+0x198/0x19c
>
> This happens because no BTI instruction is generated by the JIT for
> indirect jump targets.
>
> Fix it by emitting BTI instruction for every possible indirect jump
> targets when BTI is enabled. The targets are identified by traversing
> all instruction arrays of jump table type used by the BPF program,
> since indirect jump targets can only be read from instruction arrays
> of jump table type.
>
> Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
> Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
> ---
> v2:
> - Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
>
> v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
> ---
> arch/arm64/net/bpf_jit_comp.c | 20 +++++++++++
> include/linux/bpf.h | 19 +++++++++++
> kernel/bpf/bpf_insn_array.c | 63 +++++++++++++++++++++++++++++++++++
> kernel/bpf/verifier.c | 6 ++++
> 4 files changed, 108 insertions(+)
>
> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> index 0c4d44bcfbf4..f08f0f9fa04e 100644
> --- a/arch/arm64/net/bpf_jit_comp.c
> +++ b/arch/arm64/net/bpf_jit_comp.c
> @@ -78,6 +78,7 @@ static const int bpf2a64[] = {
>
> struct jit_ctx {
> const struct bpf_prog *prog;
> + unsigned long *indirect_targets;
> int idx;
> int epilogue_offset;
> int *offset;
> @@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
> return 0;
> }
>
> +static bool is_indirect_target(int insn_off, unsigned long *targets_bitmap)
> +{
> + return targets_bitmap && test_bit(insn_off, targets_bitmap);
> +}
> +
> /* JITs an eBPF instruction.
> * Returns:
> * 0 - successfully JITed an 8-byte eBPF instruction.
> @@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
> int ret;
> bool sign_extend;
>
> + if (is_indirect_target(i, ctx->indirect_targets))
> + emit_bti(A64_BTI_J, ctx);
> +
> switch (code) {
> /* dst = src */
> case BPF_ALU | BPF_MOV | BPF_X:
> @@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> memset(&ctx, 0, sizeof(ctx));
> ctx.prog = prog;
>
> + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_jump_table(prog)) {
> + ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
> + GFP_KERNEL);
> + if (ctx.indirect_targets == NULL) {
> + prog = orig_prog;
> + goto out_off;
> + }
> + bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
> + }
> +
> ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
> if (ctx.offset == NULL) {
> prog = orig_prog;
> @@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> prog->aux->priv_stack_ptr = NULL;
> }
> kvfree(ctx.offset);
> + kvfree(ctx.indirect_targets);
> out_priv_stack:
> kfree(jit_data);
> prog->aux->jit_data = NULL;
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index da6a00dd313f..a3a89d4b4dae 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -3875,13 +3875,32 @@ void bpf_insn_array_release(struct bpf_map *map);
> void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
> void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
>
> +enum bpf_insn_array_type {
> + BPF_INSN_ARRAY_VOID,
What is the purpose for BPF_INSN_ARRAY_VOID? Do we really need it?
> + BPF_INSN_ARRAY_JUMP_TABLE,
> +};
> +
> #ifdef CONFIG_BPF_SYSCALL
> void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
> +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type);
> +bool bpf_prog_has_jump_table(const struct bpf_prog *prog);
> #else
> static inline void
> bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
> {
> }
> +static inline void
> +bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> +{
> +}
> +static inline void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
> +{
> +}
> +static inline bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
> +{
> + return false;
> +}
> #endif
>
> static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
> diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
> index c96630cb75bf..fbffc865feab 100644
> --- a/kernel/bpf/bpf_insn_array.c
> +++ b/kernel/bpf/bpf_insn_array.c
> @@ -5,6 +5,7 @@
>
> struct bpf_insn_array {
> struct bpf_map map;
> + int type;
> atomic_t used;
> long *ips;
> DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
> @@ -159,6 +160,17 @@ static bool is_insn_array(const struct bpf_map *map)
> return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
> }
>
> +static bool is_jump_table(const struct bpf_map *map)
> +{
> + struct bpf_insn_array *insn_array;
> +
> + if (!is_insn_array(map))
> + return false;
> +
> + insn_array = cast_insn_array(map);
> + return insn_array->type == BPF_INSN_ARRAY_JUMP_TABLE;
> +}
> +
> static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
> const struct bpf_prog *prog)
> {
> @@ -302,3 +314,54 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
> }
> }
> }
> +
> +bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
> +{
> + int i;
> +
> + for (i = 0; i < prog->aux->used_map_cnt; i++) {
> + if (is_jump_table(prog->aux->used_maps[i]))
> + return true;
> + }
> + return false;
> +}
> +
> +/*
> + * This function collects possible indirect jump targets in a BPF program. Since indirect jump
> + * targets can only be read from indirect arrays used as jump table, it traverses all jump
> + * tables used by @prog. For each instruction found in the jump tables, it sets the corresponding
> + * bit in @bitmap.
> + */
> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> +{
> + struct bpf_insn_array *insn_array;
> + struct bpf_map *map;
> + u32 xlated_off;
> + int i, j;
> +
> + for (i = 0; i < prog->aux->used_map_cnt; i++) {
> + map = prog->aux->used_maps[i];
> + if (!is_jump_table(map))
> + continue;
> +
> + insn_array = cast_insn_array(map);
> + for (j = 0; j < map->max_entries; j++) {
> + xlated_off = insn_array->values[j].xlated_off;
> + if (xlated_off == INSN_DELETED)
> + continue;
> + if (xlated_off < prog->aux->subprog_start)
> + continue;
> + xlated_off -= prog->aux->subprog_start;
> + if (xlated_off >= prog->len)
> + continue;
The above codes are duplicated with bpf_prog_update_insn_ptrs().
Maybe we can have a helper for the above?
> + __set_bit(xlated_off, bitmap);
> + }
> + }
> +}
> +
> +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
> +{
> + struct bpf_insn_array *insn_array = cast_insn_array(map);
> +
> + insn_array->type = type;
> +}
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index d6b8a77fbe3b..ee6f4ddfbb79 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -20288,6 +20288,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
> return -EINVAL;
> }
>
> + /*
> + * Explicitly mark this map as a jump table such that it can be
> + * distinguished later from other instruction arrays
> + */
> + bpf_prog_set_insn_array_type(map, BPF_INSN_ARRAY_JUMP_TABLE);
I think we do not need this for now. If a new indirect_jump type is introduced,
verifier/jit can be adjusted that time if necessary.
> +
> for (i = 0; i < n - 1; i++) {
> other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
> env->insn_idx, env->cur_state->speculative);
On 12/24/2025 2:32 AM, Yonghong Song wrote:
[...]
>> +
>> +/*
>> + * This function collects possible indirect jump targets in a BPF program. Since indirect jump
>> + * targets can only be read from indirect arrays used as jump table, it traverses all jump
>> + * tables used by @prog. For each instruction found in the jump tables, it sets the corresponding
>> + * bit in @bitmap.
>> + */
>> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
>> +{
>> + struct bpf_insn_array *insn_array;
>> + struct bpf_map *map;
>> + u32 xlated_off;
>> + int i, j;
>> +
>> + for (i = 0; i < prog->aux->used_map_cnt; i++) {
>> + map = prog->aux->used_maps[i];
>> + if (!is_jump_table(map))
>> + continue;
>> +
>> + insn_array = cast_insn_array(map);
>> + for (j = 0; j < map->max_entries; j++) {
>> + xlated_off = insn_array->values[j].xlated_off;
>> + if (xlated_off == INSN_DELETED)
>> + continue;
>> + if (xlated_off < prog->aux->subprog_start)
>> + continue;
>> + xlated_off -= prog->aux->subprog_start;
>> + if (xlated_off >= prog->len)
>> + continue;
>
> The above codes are duplicated with bpf_prog_update_insn_ptrs().
> Maybe we can have a helper for the above?
>
I tried using function callbacks to factor out the duplicated code,
but the result felt a bit over-engineered. For these two functions,
simple duplication seems clearer and simpler.
>> + __set_bit(xlated_off, bitmap);
>> + }
>> + }
>> +}
>> +
>> +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
>> +{
>> + struct bpf_insn_array *insn_array = cast_insn_array(map);
>> +
>> + insn_array->type = type;
>> +}
>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>> index d6b8a77fbe3b..ee6f4ddfbb79 100644
>> --- a/kernel/bpf/verifier.c
>> +++ b/kernel/bpf/verifier.c
>> @@ -20288,6 +20288,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
>> return -EINVAL;
>> }
>> + /*
>> + * Explicitly mark this map as a jump table such that it can be
>> + * distinguished later from other instruction arrays
>> + */
>> + bpf_prog_set_insn_array_type(map, BPF_INSN_ARRAY_JUMP_TABLE);
>
> I think we do not need this for now. If a new indirect_jump type is introduced,
> verifier/jit can be adjusted that time if necessary.
>
As Anton noted, even though jump tables are currently the only type
of instruction array, users may still create insn_arrays that are not
used as jump tables. In such cases, there is no need to emit BTIs.
>> +
>> for (i = 0; i < n - 1; i++) {
>> other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
>> env->insn_idx, env->cur_state->speculative);
>
>
>
On 12/25/25 3:59 AM, Xu Kuohai wrote:
> On 12/24/2025 2:32 AM, Yonghong Song wrote:
>
> [...]
>
>>> +
>>> +/*
>>> + * This function collects possible indirect jump targets in a BPF
>>> program. Since indirect jump
>>> + * targets can only be read from indirect arrays used as jump
>>> table, it traverses all jump
>>> + * tables used by @prog. For each instruction found in the jump
>>> tables, it sets the corresponding
>>> + * bit in @bitmap.
>>> + */
>>> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog,
>>> unsigned long *bitmap)
>>> +{
>>> + struct bpf_insn_array *insn_array;
>>> + struct bpf_map *map;
>>> + u32 xlated_off;
>>> + int i, j;
>>> +
>>> + for (i = 0; i < prog->aux->used_map_cnt; i++) {
>>> + map = prog->aux->used_maps[i];
>>> + if (!is_jump_table(map))
>>> + continue;
>>> +
>>> + insn_array = cast_insn_array(map);
>>> + for (j = 0; j < map->max_entries; j++) {
>>> + xlated_off = insn_array->values[j].xlated_off;
>>> + if (xlated_off == INSN_DELETED)
>>> + continue;
>>> + if (xlated_off < prog->aux->subprog_start)
>>> + continue;
>>> + xlated_off -= prog->aux->subprog_start;
>>> + if (xlated_off >= prog->len)
>>> + continue;
>>
>> The above codes are duplicated with bpf_prog_update_insn_ptrs().
>> Maybe we can have a helper for the above?
>>
>
> I tried using function callbacks to factor out the duplicated code,
> but the result felt a bit over-engineered. For these two functions,
> simple duplication seems clearer and simpler.
I am okay with this then.
>
>>> + __set_bit(xlated_off, bitmap);
>>> + }
>>> + }
>>> +}
>>> +
>>> +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
>>> +{
>>> + struct bpf_insn_array *insn_array = cast_insn_array(map);
>>> +
>>> + insn_array->type = type;
>>> +}
>>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>>> index d6b8a77fbe3b..ee6f4ddfbb79 100644
>>> --- a/kernel/bpf/verifier.c
>>> +++ b/kernel/bpf/verifier.c
>>> @@ -20288,6 +20288,12 @@ static int check_indirect_jump(struct
>>> bpf_verifier_env *env, struct bpf_insn *in
>>> return -EINVAL;
>>> }
>>> + /*
>>> + * Explicitly mark this map as a jump table such that it can be
>>> + * distinguished later from other instruction arrays
>>> + */
>>> + bpf_prog_set_insn_array_type(map, BPF_INSN_ARRAY_JUMP_TABLE);
>>
>> I think we do not need this for now. If a new indirect_jump type is
>> introduced,
>> verifier/jit can be adjusted that time if necessary.
>>
>
> As Anton noted, even though jump tables are currently the only type
> of instruction array, users may still create insn_arrays that are not
> used as jump tables. In such cases, there is no need to emit BTIs.
Okay. Thanks for explanation.
>
>>> +
>>> for (i = 0; i < n - 1; i++) {
>>> other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
>>> env->insn_idx, env->cur_state->speculative);
>>
>>
>>
>
On 25/12/23 10:32AM, Yonghong Song wrote:
>
>
> On 12/23/25 12:54 AM, Xu Kuohai wrote:
> > From: Xu Kuohai <xukuohai@huawei.com>
> >
> > When BTI is enabled, the indirect jump selftest triggers BTI exception:
> >
> > Internal error: Oops - BTI: 0000000036000003 [#1] SMP
> > ...
> > Call trace:
> > bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
> > bpf_prog_run_pin_on_cpu+0x140/0x464
> > bpf_prog_test_run_syscall+0x274/0x3ac
> > bpf_prog_test_run+0x224/0x2b0
> > __sys_bpf+0x4cc/0x5c8
> > __arm64_sys_bpf+0x7c/0x94
> > invoke_syscall+0x78/0x20c
> > el0_svc_common+0x11c/0x1c0
> > do_el0_svc+0x48/0x58
> > el0_svc+0x54/0x19c
> > el0t_64_sync_handler+0x84/0x12c
> > el0t_64_sync+0x198/0x19c
> >
> > This happens because no BTI instruction is generated by the JIT for
> > indirect jump targets.
> >
> > Fix it by emitting BTI instruction for every possible indirect jump
> > targets when BTI is enabled. The targets are identified by traversing
> > all instruction arrays of jump table type used by the BPF program,
> > since indirect jump targets can only be read from instruction arrays
> > of jump table type.
> >
> > Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
> > Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
> > ---
> > v2:
> > - Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
> >
> > v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
> > ---
> > arch/arm64/net/bpf_jit_comp.c | 20 +++++++++++
> > include/linux/bpf.h | 19 +++++++++++
> > kernel/bpf/bpf_insn_array.c | 63 +++++++++++++++++++++++++++++++++++
> > kernel/bpf/verifier.c | 6 ++++
> > 4 files changed, 108 insertions(+)
> >
> > diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> > index 0c4d44bcfbf4..f08f0f9fa04e 100644
> > --- a/arch/arm64/net/bpf_jit_comp.c
> > +++ b/arch/arm64/net/bpf_jit_comp.c
> > @@ -78,6 +78,7 @@ static const int bpf2a64[] = {
> > struct jit_ctx {
> > const struct bpf_prog *prog;
> > + unsigned long *indirect_targets;
> > int idx;
> > int epilogue_offset;
> > int *offset;
> > @@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
> > return 0;
> > }
> > +static bool is_indirect_target(int insn_off, unsigned long *targets_bitmap)
> > +{
> > + return targets_bitmap && test_bit(insn_off, targets_bitmap);
> > +}
> > +
> > /* JITs an eBPF instruction.
> > * Returns:
> > * 0 - successfully JITed an 8-byte eBPF instruction.
> > @@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
> > int ret;
> > bool sign_extend;
> > + if (is_indirect_target(i, ctx->indirect_targets))
> > + emit_bti(A64_BTI_J, ctx);
> > +
> > switch (code) {
> > /* dst = src */
> > case BPF_ALU | BPF_MOV | BPF_X:
> > @@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> > memset(&ctx, 0, sizeof(ctx));
> > ctx.prog = prog;
> > + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_jump_table(prog)) {
> > + ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
> > + GFP_KERNEL);
> > + if (ctx.indirect_targets == NULL) {
> > + prog = orig_prog;
> > + goto out_off;
> > + }
> > + bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
> > + }
> > +
> > ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
> > if (ctx.offset == NULL) {
> > prog = orig_prog;
> > @@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
> > prog->aux->priv_stack_ptr = NULL;
> > }
> > kvfree(ctx.offset);
> > + kvfree(ctx.indirect_targets);
> > out_priv_stack:
> > kfree(jit_data);
> > prog->aux->jit_data = NULL;
> > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > index da6a00dd313f..a3a89d4b4dae 100644
> > --- a/include/linux/bpf.h
> > +++ b/include/linux/bpf.h
> > @@ -3875,13 +3875,32 @@ void bpf_insn_array_release(struct bpf_map *map);
> > void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
> > void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
> > +enum bpf_insn_array_type {
> > + BPF_INSN_ARRAY_VOID,
>
> What is the purpose for BPF_INSN_ARRAY_VOID? Do we really need it?
There seems to be no need for a name for the default case,
but BPF_INSN_ARRAY_JUMP_TABLE should be != 0, so can be just
enum bpf_insn_array_type {
BPF_INSN_ARRAY_JUMP_TABLE = 1,
};
> > + BPF_INSN_ARRAY_JUMP_TABLE,
> > +};
> > +
> > #ifdef CONFIG_BPF_SYSCALL
> > void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
> > +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
> > +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type);
> > +bool bpf_prog_has_jump_table(const struct bpf_prog *prog);
> > #else
> > static inline void
> > bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
> > {
> > }
> > +static inline void
> > +bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> > +{
> > +}
> > +static inline void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
> > +{
> > +}
> > +static inline bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
> > +{
> > + return false;
> > +}
> > #endif
> > static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
> > diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
> > index c96630cb75bf..fbffc865feab 100644
> > --- a/kernel/bpf/bpf_insn_array.c
> > +++ b/kernel/bpf/bpf_insn_array.c
> > @@ -5,6 +5,7 @@
> > struct bpf_insn_array {
> > struct bpf_map map;
> > + int type;
> > atomic_t used;
> > long *ips;
> > DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
> > @@ -159,6 +160,17 @@ static bool is_insn_array(const struct bpf_map *map)
> > return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
> > }
> > +static bool is_jump_table(const struct bpf_map *map)
> > +{
> > + struct bpf_insn_array *insn_array;
> > +
> > + if (!is_insn_array(map))
> > + return false;
> > +
> > + insn_array = cast_insn_array(map);
> > + return insn_array->type == BPF_INSN_ARRAY_JUMP_TABLE;
> > +}
> > +
> > static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
> > const struct bpf_prog *prog)
> > {
> > @@ -302,3 +314,54 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
> > }
> > }
> > }
> > +
> > +bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
> > +{
> > + int i;
> > +
> > + for (i = 0; i < prog->aux->used_map_cnt; i++) {
> > + if (is_jump_table(prog->aux->used_maps[i]))
> > + return true;
> > + }
> > + return false;
> > +}
> > +
> > +/*
> > + * This function collects possible indirect jump targets in a BPF program. Since indirect jump
> > + * targets can only be read from indirect arrays used as jump table, it traverses all jump
> > + * tables used by @prog. For each instruction found in the jump tables, it sets the corresponding
> > + * bit in @bitmap.
> > + */
> > +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
> > +{
> > + struct bpf_insn_array *insn_array;
> > + struct bpf_map *map;
> > + u32 xlated_off;
> > + int i, j;
> > +
> > + for (i = 0; i < prog->aux->used_map_cnt; i++) {
> > + map = prog->aux->used_maps[i];
> > + if (!is_jump_table(map))
> > + continue;
> > +
> > + insn_array = cast_insn_array(map);
> > + for (j = 0; j < map->max_entries; j++) {
> > + xlated_off = insn_array->values[j].xlated_off;
> > + if (xlated_off == INSN_DELETED)
> > + continue;
> > + if (xlated_off < prog->aux->subprog_start)
> > + continue;
> > + xlated_off -= prog->aux->subprog_start;
> > + if (xlated_off >= prog->len)
> > + continue;
>
> The above codes are duplicated with bpf_prog_update_insn_ptrs().
> Maybe we can have a helper for the above?
>
> > + __set_bit(xlated_off, bitmap);
> > + }
> > + }
> > +}
> > +
> > +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
> > +{
> > + struct bpf_insn_array *insn_array = cast_insn_array(map);
> > +
> > + insn_array->type = type;
> > +}
> > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> > index d6b8a77fbe3b..ee6f4ddfbb79 100644
> > --- a/kernel/bpf/verifier.c
> > +++ b/kernel/bpf/verifier.c
> > @@ -20288,6 +20288,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
> > return -EINVAL;
> > }
> > + /*
> > + * Explicitly mark this map as a jump table such that it can be
> > + * distinguished later from other instruction arrays
> > + */
> > + bpf_prog_set_insn_array_type(map, BPF_INSN_ARRAY_JUMP_TABLE);
>
> I think we do not need this for now. If a new indirect_jump type is introduced,
> verifier/jit can be adjusted that time if necessary.
See the v1 thread. In brief, a user can already create an instruction array
which is not used as a jump table, say, pointing to every instruction (see
selftests for insn array). And if not distinguished from a jump table, this
will force arm jit into emitting BTI for every instruction listed inside it.
> > +
> > for (i = 0; i < n - 1; i++) {
> > other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
> > env->insn_idx, env->cur_state->speculative);
>
On 12/25/25 3:04 AM, Anton Protopopov wrote:
> On 25/12/23 10:32AM, Yonghong Song wrote:
>>
>> On 12/23/25 12:54 AM, Xu Kuohai wrote:
>>> From: Xu Kuohai <xukuohai@huawei.com>
>>>
>>> When BTI is enabled, the indirect jump selftest triggers BTI exception:
>>>
>>> Internal error: Oops - BTI: 0000000036000003 [#1] SMP
>>> ...
>>> Call trace:
>>> bpf_prog_2e5f1c71c13ac3e0_big_jump_table+0x54/0xf8 (P)
>>> bpf_prog_run_pin_on_cpu+0x140/0x464
>>> bpf_prog_test_run_syscall+0x274/0x3ac
>>> bpf_prog_test_run+0x224/0x2b0
>>> __sys_bpf+0x4cc/0x5c8
>>> __arm64_sys_bpf+0x7c/0x94
>>> invoke_syscall+0x78/0x20c
>>> el0_svc_common+0x11c/0x1c0
>>> do_el0_svc+0x48/0x58
>>> el0_svc+0x54/0x19c
>>> el0t_64_sync_handler+0x84/0x12c
>>> el0t_64_sync+0x198/0x19c
>>>
>>> This happens because no BTI instruction is generated by the JIT for
>>> indirect jump targets.
>>>
>>> Fix it by emitting BTI instruction for every possible indirect jump
>>> targets when BTI is enabled. The targets are identified by traversing
>>> all instruction arrays of jump table type used by the BPF program,
>>> since indirect jump targets can only be read from instruction arrays
>>> of jump table type.
>>>
>>> Fixes: f4a66cf1cb14 ("bpf: arm64: Add support for indirect jumps")
>>> Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
>>> ---
>>> v2:
>>> - Exclude instruction arrays not used for indirect jumps (Anton Protopopov)
>>>
>>> v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com/
>>> ---
>>> arch/arm64/net/bpf_jit_comp.c | 20 +++++++++++
>>> include/linux/bpf.h | 19 +++++++++++
>>> kernel/bpf/bpf_insn_array.c | 63 +++++++++++++++++++++++++++++++++++
>>> kernel/bpf/verifier.c | 6 ++++
>>> 4 files changed, 108 insertions(+)
>>>
>>> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
>>> index 0c4d44bcfbf4..f08f0f9fa04e 100644
>>> --- a/arch/arm64/net/bpf_jit_comp.c
>>> +++ b/arch/arm64/net/bpf_jit_comp.c
>>> @@ -78,6 +78,7 @@ static const int bpf2a64[] = {
>>> struct jit_ctx {
>>> const struct bpf_prog *prog;
>>> + unsigned long *indirect_targets;
>>> int idx;
>>> int epilogue_offset;
>>> int *offset;
>>> @@ -1199,6 +1200,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
>>> return 0;
>>> }
>>> +static bool is_indirect_target(int insn_off, unsigned long *targets_bitmap)
>>> +{
>>> + return targets_bitmap && test_bit(insn_off, targets_bitmap);
>>> +}
>>> +
>>> /* JITs an eBPF instruction.
>>> * Returns:
>>> * 0 - successfully JITed an 8-byte eBPF instruction.
>>> @@ -1231,6 +1237,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
>>> int ret;
>>> bool sign_extend;
>>> + if (is_indirect_target(i, ctx->indirect_targets))
>>> + emit_bti(A64_BTI_J, ctx);
>>> +
>>> switch (code) {
>>> /* dst = src */
>>> case BPF_ALU | BPF_MOV | BPF_X:
>>> @@ -2085,6 +2094,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>>> memset(&ctx, 0, sizeof(ctx));
>>> ctx.prog = prog;
>>> + if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && bpf_prog_has_jump_table(prog)) {
>>> + ctx.indirect_targets = kvcalloc(BITS_TO_LONGS(prog->len), sizeof(unsigned long),
>>> + GFP_KERNEL);
>>> + if (ctx.indirect_targets == NULL) {
>>> + prog = orig_prog;
>>> + goto out_off;
>>> + }
>>> + bpf_prog_collect_indirect_targets(prog, ctx.indirect_targets);
>>> + }
>>> +
>>> ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
>>> if (ctx.offset == NULL) {
>>> prog = orig_prog;
>>> @@ -2248,6 +2267,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
>>> prog->aux->priv_stack_ptr = NULL;
>>> }
>>> kvfree(ctx.offset);
>>> + kvfree(ctx.indirect_targets);
>>> out_priv_stack:
>>> kfree(jit_data);
>>> prog->aux->jit_data = NULL;
>>> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
>>> index da6a00dd313f..a3a89d4b4dae 100644
>>> --- a/include/linux/bpf.h
>>> +++ b/include/linux/bpf.h
>>> @@ -3875,13 +3875,32 @@ void bpf_insn_array_release(struct bpf_map *map);
>>> void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
>>> void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
>>> +enum bpf_insn_array_type {
>>> + BPF_INSN_ARRAY_VOID,
>> What is the purpose for BPF_INSN_ARRAY_VOID? Do we really need it?
> There seems to be no need for a name for the default case,
> but BPF_INSN_ARRAY_JUMP_TABLE should be != 0, so can be just
>
> enum bpf_insn_array_type {
> BPF_INSN_ARRAY_JUMP_TABLE = 1,
> };
>
>>> + BPF_INSN_ARRAY_JUMP_TABLE,
>>> +};
>>> +
>>> #ifdef CONFIG_BPF_SYSCALL
>>> void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
>>> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap);
>>> +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type);
>>> +bool bpf_prog_has_jump_table(const struct bpf_prog *prog);
>>> #else
>>> static inline void
>>> bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
>>> {
>>> }
>>> +static inline void
>>> +bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
>>> +{
>>> +}
>>> +static inline void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
>>> +{
>>> +}
>>> +static inline bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
>>> +{
>>> + return false;
>>> +}
>>> #endif
>>> static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
>>> diff --git a/kernel/bpf/bpf_insn_array.c b/kernel/bpf/bpf_insn_array.c
>>> index c96630cb75bf..fbffc865feab 100644
>>> --- a/kernel/bpf/bpf_insn_array.c
>>> +++ b/kernel/bpf/bpf_insn_array.c
>>> @@ -5,6 +5,7 @@
>>> struct bpf_insn_array {
>>> struct bpf_map map;
>>> + int type;
>>> atomic_t used;
>>> long *ips;
>>> DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
>>> @@ -159,6 +160,17 @@ static bool is_insn_array(const struct bpf_map *map)
>>> return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
>>> }
>>> +static bool is_jump_table(const struct bpf_map *map)
>>> +{
>>> + struct bpf_insn_array *insn_array;
>>> +
>>> + if (!is_insn_array(map))
>>> + return false;
>>> +
>>> + insn_array = cast_insn_array(map);
>>> + return insn_array->type == BPF_INSN_ARRAY_JUMP_TABLE;
>>> +}
>>> +
>>> static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
>>> const struct bpf_prog *prog)
>>> {
>>> @@ -302,3 +314,54 @@ void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
>>> }
>>> }
>>> }
>>> +
>>> +bool bpf_prog_has_jump_table(const struct bpf_prog *prog)
>>> +{
>>> + int i;
>>> +
>>> + for (i = 0; i < prog->aux->used_map_cnt; i++) {
>>> + if (is_jump_table(prog->aux->used_maps[i]))
>>> + return true;
>>> + }
>>> + return false;
>>> +}
>>> +
>>> +/*
>>> + * This function collects possible indirect jump targets in a BPF program. Since indirect jump
>>> + * targets can only be read from indirect arrays used as jump table, it traverses all jump
>>> + * tables used by @prog. For each instruction found in the jump tables, it sets the corresponding
>>> + * bit in @bitmap.
>>> + */
>>> +void bpf_prog_collect_indirect_targets(const struct bpf_prog *prog, unsigned long *bitmap)
>>> +{
>>> + struct bpf_insn_array *insn_array;
>>> + struct bpf_map *map;
>>> + u32 xlated_off;
>>> + int i, j;
>>> +
>>> + for (i = 0; i < prog->aux->used_map_cnt; i++) {
>>> + map = prog->aux->used_maps[i];
>>> + if (!is_jump_table(map))
>>> + continue;
>>> +
>>> + insn_array = cast_insn_array(map);
>>> + for (j = 0; j < map->max_entries; j++) {
>>> + xlated_off = insn_array->values[j].xlated_off;
>>> + if (xlated_off == INSN_DELETED)
>>> + continue;
>>> + if (xlated_off < prog->aux->subprog_start)
>>> + continue;
>>> + xlated_off -= prog->aux->subprog_start;
>>> + if (xlated_off >= prog->len)
>>> + continue;
>> The above codes are duplicated with bpf_prog_update_insn_ptrs().
>> Maybe we can have a helper for the above?
>>
>>> + __set_bit(xlated_off, bitmap);
>>> + }
>>> + }
>>> +}
>>> +
>>> +void bpf_prog_set_insn_array_type(struct bpf_map *map, int type)
>>> +{
>>> + struct bpf_insn_array *insn_array = cast_insn_array(map);
>>> +
>>> + insn_array->type = type;
>>> +}
>>> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
>>> index d6b8a77fbe3b..ee6f4ddfbb79 100644
>>> --- a/kernel/bpf/verifier.c
>>> +++ b/kernel/bpf/verifier.c
>>> @@ -20288,6 +20288,12 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
>>> return -EINVAL;
>>> }
>>> + /*
>>> + * Explicitly mark this map as a jump table such that it can be
>>> + * distinguished later from other instruction arrays
>>> + */
>>> + bpf_prog_set_insn_array_type(map, BPF_INSN_ARRAY_JUMP_TABLE);
>> I think we do not need this for now. If a new indirect_jump type is introduced,
>> verifier/jit can be adjusted that time if necessary.
> See the v1 thread. In brief, a user can already create an instruction array
> which is not used as a jump table, say, pointing to every instruction (see
> selftests for insn array). And if not distinguished from a jump table, this
> will force arm jit into emitting BTI for every instruction listed inside it.
I missed this. Thanks for explanation!
>
>>> +
>>> for (i = 0; i < n - 1; i++) {
>>> other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
>>> env->insn_idx, env->cur_state->speculative);
On 12/25/2025 7:04 PM, Anton Protopopov wrote:
[...]
>>> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
>>> index da6a00dd313f..a3a89d4b4dae 100644
>>> --- a/include/linux/bpf.h
>>> +++ b/include/linux/bpf.h
>>> @@ -3875,13 +3875,32 @@ void bpf_insn_array_release(struct bpf_map *map);
>>> void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
>>> void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
>>> +enum bpf_insn_array_type {
>>> + BPF_INSN_ARRAY_VOID,
>>
>> What is the purpose for BPF_INSN_ARRAY_VOID? Do we really need it?
>
> There seems to be no need for a name for the default case,
> but BPF_INSN_ARRAY_JUMP_TABLE should be != 0, so can be just
>
> enum bpf_insn_array_type {
> BPF_INSN_ARRAY_JUMP_TABLE = 1,
> };
>
Having only BPF_INSN_ARRAY_JUMP_TABLE feels incomplete, since there
would be no enum value to indicate an instruction array without a
specific purpose, like the insn_arrays created in selftests [1].
[1] https://lore.kernel.org/bpf/20251105090410.1250500-5-a.s.protopopov@gmail.com/
[...]
On 25/12/25 07:46PM, Xu Kuohai wrote:
> On 12/25/2025 7:04 PM, Anton Protopopov wrote:
>
> [...]
>
> > > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> > > > index da6a00dd313f..a3a89d4b4dae 100644
> > > > --- a/include/linux/bpf.h
> > > > +++ b/include/linux/bpf.h
> > > > @@ -3875,13 +3875,32 @@ void bpf_insn_array_release(struct bpf_map *map);
> > > > void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
> > > > void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
> > > > +enum bpf_insn_array_type {
> > > > + BPF_INSN_ARRAY_VOID,
> > >
> > > What is the purpose for BPF_INSN_ARRAY_VOID? Do we really need it?
> >
> > There seems to be no need for a name for the default case,
> > but BPF_INSN_ARRAY_JUMP_TABLE should be != 0, so can be just
> >
> > enum bpf_insn_array_type {
> > BPF_INSN_ARRAY_JUMP_TABLE = 1,
> > };
> >
>
> Having only BPF_INSN_ARRAY_JUMP_TABLE feels incomplete, since there
> would be no enum value to indicate an instruction array without a
> specific purpose, like the insn_arrays created in selftests [1].
Yes, but it is also never used explicitly, right?
The only usage is in "x != BPF_INSN_ARRAY_JUMP_TABLE".
> [1] https://lore.kernel.org/bpf/20251105090410.1250500-5-a.s.protopopov@gmail.com/
>
> [...]
>
© 2016 - 2026 Red Hat, Inc.