From: Kaitao Cheng <chengkaitao@kylinos.cn>
Replace per-kfunc btf_id chains in list/rbtree/res_lock and graph node
checks with btf_id_in_kfunc_table() and static kfunc tables for easier
maintenance.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
kernel/bpf/verifier.c | 93 +++++++++++++++++++++++++++++--------------
1 file changed, 64 insertions(+), 29 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d133d18aa0cc..25961cf83a67 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12600,6 +12600,60 @@ BTF_ID(func, bpf_session_is_return)
BTF_ID(func, bpf_stream_vprintk)
BTF_ID(func, bpf_stream_print_stack)
+static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
+ KF_bpf_list_push_front_impl,
+ KF_bpf_list_push_back_impl,
+ KF_bpf_list_pop_front,
+ KF_bpf_list_pop_back,
+ KF_bpf_list_del,
+ KF_bpf_list_front,
+ KF_bpf_list_back,
+ KF_bpf_list_add_impl,
+ KF_bpf_list_node_is_edge,
+ KF_bpf_list_empty,
+};
+
+/* Kfuncs that take a list node argument (bpf_list_node *). */
+static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
+ KF_bpf_list_push_front_impl,
+ KF_bpf_list_push_back_impl,
+ KF_bpf_list_del,
+ KF_bpf_list_add_impl,
+ KF_bpf_list_node_is_edge,
+};
+
+/* Kfuncs that take an rbtree node argument (bpf_rb_node *). */
+static const enum special_kfunc_type bpf_rbtree_node_api_kfuncs[] = {
+ KF_bpf_rbtree_remove,
+ KF_bpf_rbtree_add_impl,
+ KF_bpf_rbtree_left,
+ KF_bpf_rbtree_right,
+};
+
+static const enum special_kfunc_type bpf_rbtree_api_kfuncs[] = {
+ KF_bpf_rbtree_add_impl,
+ KF_bpf_rbtree_remove,
+ KF_bpf_rbtree_first,
+ KF_bpf_rbtree_root,
+ KF_bpf_rbtree_left,
+ KF_bpf_rbtree_right,
+};
+
+static const enum special_kfunc_type bpf_res_spin_lock_kfuncs[] = {
+ KF_bpf_res_spin_lock,
+ KF_bpf_res_spin_unlock,
+ KF_bpf_res_spin_lock_irqsave,
+ KF_bpf_res_spin_unlock_irqrestore,
+};
+
+static bool btf_id_in_kfunc_table(u32 btf_id, const enum special_kfunc_type *kfuncs, int n)
+{
+ for (int i = 0; i < n; i++)
+ if (btf_id == special_kfunc_list[kfuncs[i]])
+ return true;
+ return false;
+}
+
static bool is_task_work_add_kfunc(u32 func_id)
{
return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
@@ -13000,26 +13054,14 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
static bool is_bpf_list_api_kfunc(u32 btf_id)
{
- return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
- btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
- btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
- btf_id == special_kfunc_list[KF_bpf_list_del] ||
- btf_id == special_kfunc_list[KF_bpf_list_front] ||
- btf_id == special_kfunc_list[KF_bpf_list_back] ||
- btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
- btf_id == special_kfunc_list[KF_bpf_list_node_is_edge] ||
- btf_id == special_kfunc_list[KF_bpf_list_empty];
+ return btf_id_in_kfunc_table(btf_id, bpf_list_api_kfuncs,
+ ARRAY_SIZE(bpf_list_api_kfuncs));
}
static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
{
- return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_first] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_root] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
- btf_id == special_kfunc_list[KF_bpf_rbtree_right];
+ return btf_id_in_kfunc_table(btf_id, bpf_rbtree_api_kfuncs,
+ ARRAY_SIZE(bpf_rbtree_api_kfuncs));
}
static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
@@ -13037,10 +13079,8 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id)
static bool is_bpf_res_spin_lock_kfunc(u32 btf_id)
{
- return btf_id == special_kfunc_list[KF_bpf_res_spin_lock] ||
- btf_id == special_kfunc_list[KF_bpf_res_spin_unlock] ||
- btf_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave] ||
- btf_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore];
+ return btf_id_in_kfunc_table(btf_id, bpf_res_spin_lock_kfuncs,
+ ARRAY_SIZE(bpf_res_spin_lock_kfuncs));
}
static bool is_bpf_arena_kfunc(u32 btf_id)
@@ -13129,17 +13169,12 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
switch (node_field_type) {
case BPF_LIST_NODE:
- ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_list_del] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_list_node_is_edge]);
+ ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_list_node_api_kfuncs,
+ ARRAY_SIZE(bpf_list_node_api_kfuncs));
break;
case BPF_RB_NODE:
- ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
- kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]);
+ ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_rbtree_node_api_kfuncs,
+ ARRAY_SIZE(bpf_rbtree_node_api_kfuncs));
break;
default:
verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
--
2.50.1 (Apple Git-155)
NIT on the subject prefix: instead of bpf/verifier, it should be one of
bpf, libbpf, selftests/bpf, bpftool, and "bpf, x86/arm64".
So, the subject can be "bpf: refactor kfunc checks using table-driven
approach in verifier".
Thanks,
Leon
On 2026/3/4 22:34, Chengkaitao wrote:
> From: Kaitao Cheng <chengkaitao@kylinos.cn>
>
> Replace per-kfunc btf_id chains in list/rbtree/res_lock and graph node
> checks with btf_id_in_kfunc_table() and static kfunc tables for easier
> maintenance.
>
> Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
> ---
> kernel/bpf/verifier.c | 93 +++++++++++++++++++++++++++++--------------
> 1 file changed, 64 insertions(+), 29 deletions(-)
>
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index d133d18aa0cc..25961cf83a67 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -12600,6 +12600,60 @@ BTF_ID(func, bpf_session_is_return)
> BTF_ID(func, bpf_stream_vprintk)
> BTF_ID(func, bpf_stream_print_stack)
>
> +static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
> + KF_bpf_list_push_front_impl,
> + KF_bpf_list_push_back_impl,
> + KF_bpf_list_pop_front,
> + KF_bpf_list_pop_back,
> + KF_bpf_list_del,
> + KF_bpf_list_front,
> + KF_bpf_list_back,
> + KF_bpf_list_add_impl,
> + KF_bpf_list_node_is_edge,
> + KF_bpf_list_empty,
> +};
> +
> +/* Kfuncs that take a list node argument (bpf_list_node *). */
> +static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
> + KF_bpf_list_push_front_impl,
> + KF_bpf_list_push_back_impl,
> + KF_bpf_list_del,
> + KF_bpf_list_add_impl,
> + KF_bpf_list_node_is_edge,
> +};
> +
> +/* Kfuncs that take an rbtree node argument (bpf_rb_node *). */
> +static const enum special_kfunc_type bpf_rbtree_node_api_kfuncs[] = {
> + KF_bpf_rbtree_remove,
> + KF_bpf_rbtree_add_impl,
> + KF_bpf_rbtree_left,
> + KF_bpf_rbtree_right,
> +};
> +
> +static const enum special_kfunc_type bpf_rbtree_api_kfuncs[] = {
> + KF_bpf_rbtree_add_impl,
> + KF_bpf_rbtree_remove,
> + KF_bpf_rbtree_first,
> + KF_bpf_rbtree_root,
> + KF_bpf_rbtree_left,
> + KF_bpf_rbtree_right,
> +};
> +
> +static const enum special_kfunc_type bpf_res_spin_lock_kfuncs[] = {
> + KF_bpf_res_spin_lock,
> + KF_bpf_res_spin_unlock,
> + KF_bpf_res_spin_lock_irqsave,
> + KF_bpf_res_spin_unlock_irqrestore,
> +};
> +
> +static bool btf_id_in_kfunc_table(u32 btf_id, const enum special_kfunc_type *kfuncs, int n)
> +{
> + for (int i = 0; i < n; i++)
> + if (btf_id == special_kfunc_list[kfuncs[i]])
> + return true;
> + return false;
> +}
> +
> static bool is_task_work_add_kfunc(u32 func_id)
> {
> return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
> @@ -13000,26 +13054,14 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
>
> static bool is_bpf_list_api_kfunc(u32 btf_id)
> {
> - return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
> - btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> - btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
> - btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
> - btf_id == special_kfunc_list[KF_bpf_list_del] ||
> - btf_id == special_kfunc_list[KF_bpf_list_front] ||
> - btf_id == special_kfunc_list[KF_bpf_list_back] ||
> - btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
> - btf_id == special_kfunc_list[KF_bpf_list_node_is_edge] ||
> - btf_id == special_kfunc_list[KF_bpf_list_empty];
> + return btf_id_in_kfunc_table(btf_id, bpf_list_api_kfuncs,
> + ARRAY_SIZE(bpf_list_api_kfuncs));
> }
>
> static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
> {
> - return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
> - btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
> - btf_id == special_kfunc_list[KF_bpf_rbtree_first] ||
> - btf_id == special_kfunc_list[KF_bpf_rbtree_root] ||
> - btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
> - btf_id == special_kfunc_list[KF_bpf_rbtree_right];
> + return btf_id_in_kfunc_table(btf_id, bpf_rbtree_api_kfuncs,
> + ARRAY_SIZE(bpf_rbtree_api_kfuncs));
> }
>
> static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
> @@ -13037,10 +13079,8 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id)
>
> static bool is_bpf_res_spin_lock_kfunc(u32 btf_id)
> {
> - return btf_id == special_kfunc_list[KF_bpf_res_spin_lock] ||
> - btf_id == special_kfunc_list[KF_bpf_res_spin_unlock] ||
> - btf_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave] ||
> - btf_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore];
> + return btf_id_in_kfunc_table(btf_id, bpf_res_spin_lock_kfuncs,
> + ARRAY_SIZE(bpf_res_spin_lock_kfuncs));
> }
>
> static bool is_bpf_arena_kfunc(u32 btf_id)
> @@ -13129,17 +13169,12 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
>
> switch (node_field_type) {
> case BPF_LIST_NODE:
> - ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
> - kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> - kfunc_btf_id == special_kfunc_list[KF_bpf_list_del] ||
> - kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
> - kfunc_btf_id == special_kfunc_list[KF_bpf_list_node_is_edge]);
> + ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_list_node_api_kfuncs,
> + ARRAY_SIZE(bpf_list_node_api_kfuncs));
> break;
> case BPF_RB_NODE:
> - ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
> - kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
> - kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
> - kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]);
> + ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_rbtree_node_api_kfuncs,
> + ARRAY_SIZE(bpf_rbtree_node_api_kfuncs));
> break;
> default:
> verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
[ ... ]
> static bool is_bpf_res_spin_lock_kfunc(u32 btf_id)
> {
>+ return btf_id_in_kfunc_table(btf_id, bpf_res_spin_lock_kfuncs,
>+ ARRAY_SIZE(bpf_res_spin_lock_kfuncs));
> }
The continuation line here has one fewer space than the other three call
sites (is_bpf_list_api_kfunc, is_bpf_rbtree_api_kfunc, and
check_kfunc_is_graph_node_api), so ARRAY_SIZE doesn't align to the
opening parenthesis of btf_id_in_kfunc_table(). Should this be aligned
to match the others?
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/22674350640
© 2016 - 2026 Red Hat, Inc.