[PATCH bpf-next v7 5/5] bpf: refactor kfunc checks using table-driven approach in verifier

Chengkaitao posted 5 patches 1 month ago
There is a newer version of this series
[PATCH bpf-next v7 5/5] bpf: refactor kfunc checks using table-driven approach in verifier
Posted by Chengkaitao 1 month ago
From: Kaitao Cheng <chengkaitao@kylinos.cn>

Replace per-kfunc btf_id chains in list/rbtree/res_lock and graph node
checks with btf_id_in_kfunc_table() and static kfunc tables for easier
maintenance.

Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
 kernel/bpf/verifier.c | 97 +++++++++++++++++++++++++++++--------------
 1 file changed, 66 insertions(+), 31 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5e32e02429c4..853716f599ce 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12602,6 +12602,62 @@ BTF_ID(func, bpf_session_is_return)
 BTF_ID(func, bpf_stream_vprintk)
 BTF_ID(func, bpf_stream_print_stack)
 
+static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
+	KF_bpf_list_push_front_impl,
+	KF_bpf_list_push_back_impl,
+	KF_bpf_list_add_impl,
+	KF_bpf_list_pop_front,
+	KF_bpf_list_pop_back,
+	KF_bpf_list_del,
+	KF_bpf_list_front,
+	KF_bpf_list_back,
+	KF_bpf_list_is_first,
+	KF_bpf_list_is_last,
+	KF_bpf_list_empty,
+};
+
+/* Kfuncs that take a list node argument (bpf_list_node *). */
+static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
+	KF_bpf_list_push_front_impl,
+	KF_bpf_list_push_back_impl,
+	KF_bpf_list_add_impl,
+	KF_bpf_list_del,
+	KF_bpf_list_is_first,
+	KF_bpf_list_is_last,
+};
+
+/* Kfuncs that take an rbtree node argument (bpf_rb_node *). */
+static const enum special_kfunc_type bpf_rbtree_node_api_kfuncs[] = {
+	KF_bpf_rbtree_remove,
+	KF_bpf_rbtree_add_impl,
+	KF_bpf_rbtree_left,
+	KF_bpf_rbtree_right,
+};
+
+static const enum special_kfunc_type bpf_rbtree_api_kfuncs[] = {
+	KF_bpf_rbtree_add_impl,
+	KF_bpf_rbtree_remove,
+	KF_bpf_rbtree_first,
+	KF_bpf_rbtree_root,
+	KF_bpf_rbtree_left,
+	KF_bpf_rbtree_right,
+};
+
+static const enum special_kfunc_type bpf_res_spin_lock_kfuncs[] = {
+	KF_bpf_res_spin_lock,
+	KF_bpf_res_spin_unlock,
+	KF_bpf_res_spin_lock_irqsave,
+	KF_bpf_res_spin_unlock_irqrestore,
+};
+
+static bool btf_id_in_kfunc_table(u32 btf_id, const enum special_kfunc_type *kfuncs, int n)
+{
+	for (int i = 0; i < n; i++)
+		if (btf_id == special_kfunc_list[kfuncs[i]])
+			return true;
+	return false;
+}
+
 static bool is_task_work_add_kfunc(u32 func_id)
 {
 	return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
@@ -13002,27 +13058,14 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
 
 static bool is_bpf_list_api_kfunc(u32 btf_id)
 {
-	return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_del] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_front] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_back] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_is_first] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_is_last] ||
-	       btf_id == special_kfunc_list[KF_bpf_list_empty];
+	return btf_id_in_kfunc_table(btf_id, bpf_list_api_kfuncs,
+				     ARRAY_SIZE(bpf_list_api_kfuncs));
 }
 
 static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
 {
-	return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_first] ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_root] ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_right];
+	return btf_id_in_kfunc_table(btf_id, bpf_rbtree_api_kfuncs,
+				     ARRAY_SIZE(bpf_rbtree_api_kfuncs));
 }
 
 static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
@@ -13040,10 +13083,8 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id)
 
 static bool is_bpf_res_spin_lock_kfunc(u32 btf_id)
 {
-	return btf_id == special_kfunc_list[KF_bpf_res_spin_lock] ||
-	       btf_id == special_kfunc_list[KF_bpf_res_spin_unlock] ||
-	       btf_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave] ||
-	       btf_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore];
+	return btf_id_in_kfunc_table(btf_id, bpf_res_spin_lock_kfuncs,
+				     ARRAY_SIZE(bpf_res_spin_lock_kfuncs));
 }
 
 static bool is_bpf_arena_kfunc(u32 btf_id)
@@ -13132,18 +13173,12 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
 
 	switch (node_field_type) {
 	case BPF_LIST_NODE:
-		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_del] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_is_first] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_is_last]);
+		ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_list_node_api_kfuncs,
+					    ARRAY_SIZE(bpf_list_node_api_kfuncs));
 		break;
 	case BPF_RB_NODE:
-		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]);
+		ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_rbtree_node_api_kfuncs,
+					    ARRAY_SIZE(bpf_rbtree_node_api_kfuncs));
 		break;
 	default:
 		verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
-- 
2.50.1 (Apple Git-155)
Re: [PATCH bpf-next v7 5/5] bpf: refactor kfunc checks using table-driven approach in verifier
Posted by Leon Hwang 1 month ago
On 8/3/26 21:46, Chengkaitao wrote:
> From: Kaitao Cheng <chengkaitao@kylinos.cn>
> 
> Replace per-kfunc btf_id chains in list/rbtree/res_lock and graph node
> checks with btf_id_in_kfunc_table() and static kfunc tables for easier
> maintenance.
> 

Such refactoring should be the first patch? Less churn. Then, update the
list only.

However, is_bpf_rbtree_api_kfunc(), is_bpf_res_spin_lock_kfunc(), and
BPF_RB_NODE should be excluded, because you didn't touch them in this
series.

Thanks,
Leon

> Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
> ---
>  kernel/bpf/verifier.c | 97 +++++++++++++++++++++++++++++--------------
>  1 file changed, 66 insertions(+), 31 deletions(-)
> 
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 5e32e02429c4..853716f599ce 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -12602,6 +12602,62 @@ BTF_ID(func, bpf_session_is_return)
>  BTF_ID(func, bpf_stream_vprintk)
>  BTF_ID(func, bpf_stream_print_stack)
>  
> +static const enum special_kfunc_type bpf_list_api_kfuncs[] = {
> +	KF_bpf_list_push_front_impl,
> +	KF_bpf_list_push_back_impl,
> +	KF_bpf_list_add_impl,
> +	KF_bpf_list_pop_front,
> +	KF_bpf_list_pop_back,
> +	KF_bpf_list_del,
> +	KF_bpf_list_front,
> +	KF_bpf_list_back,
> +	KF_bpf_list_is_first,
> +	KF_bpf_list_is_last,
> +	KF_bpf_list_empty,
> +};
> +
> +/* Kfuncs that take a list node argument (bpf_list_node *). */
> +static const enum special_kfunc_type bpf_list_node_api_kfuncs[] = {
> +	KF_bpf_list_push_front_impl,
> +	KF_bpf_list_push_back_impl,
> +	KF_bpf_list_add_impl,
> +	KF_bpf_list_del,
> +	KF_bpf_list_is_first,
> +	KF_bpf_list_is_last,
> +};
> +
> +/* Kfuncs that take an rbtree node argument (bpf_rb_node *). */
> +static const enum special_kfunc_type bpf_rbtree_node_api_kfuncs[] = {
> +	KF_bpf_rbtree_remove,
> +	KF_bpf_rbtree_add_impl,
> +	KF_bpf_rbtree_left,
> +	KF_bpf_rbtree_right,
> +};
> +
> +static const enum special_kfunc_type bpf_rbtree_api_kfuncs[] = {
> +	KF_bpf_rbtree_add_impl,
> +	KF_bpf_rbtree_remove,
> +	KF_bpf_rbtree_first,
> +	KF_bpf_rbtree_root,
> +	KF_bpf_rbtree_left,
> +	KF_bpf_rbtree_right,
> +};
> +
> +static const enum special_kfunc_type bpf_res_spin_lock_kfuncs[] = {
> +	KF_bpf_res_spin_lock,
> +	KF_bpf_res_spin_unlock,
> +	KF_bpf_res_spin_lock_irqsave,
> +	KF_bpf_res_spin_unlock_irqrestore,
> +};
> +
> +static bool btf_id_in_kfunc_table(u32 btf_id, const enum special_kfunc_type *kfuncs, int n)
> +{
> +	for (int i = 0; i < n; i++)
> +		if (btf_id == special_kfunc_list[kfuncs[i]])
> +			return true;
> +	return false;
> +}
> +
>  static bool is_task_work_add_kfunc(u32 func_id)
>  {
>  	return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
> @@ -13002,27 +13058,14 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
>  
>  static bool is_bpf_list_api_kfunc(u32 btf_id)
>  {
> -	return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_del] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_front] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_back] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_is_first] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_is_last] ||
> -	       btf_id == special_kfunc_list[KF_bpf_list_empty];
> +	return btf_id_in_kfunc_table(btf_id, bpf_list_api_kfuncs,
> +				     ARRAY_SIZE(bpf_list_api_kfuncs));
>  }
>  
>  static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
>  {
> -	return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
> -	       btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
> -	       btf_id == special_kfunc_list[KF_bpf_rbtree_first] ||
> -	       btf_id == special_kfunc_list[KF_bpf_rbtree_root] ||
> -	       btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
> -	       btf_id == special_kfunc_list[KF_bpf_rbtree_right];
> +	return btf_id_in_kfunc_table(btf_id, bpf_rbtree_api_kfuncs,
> +				     ARRAY_SIZE(bpf_rbtree_api_kfuncs));
>  }
>  
>  static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
> @@ -13040,10 +13083,8 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id)
>  
>  static bool is_bpf_res_spin_lock_kfunc(u32 btf_id)
>  {
> -	return btf_id == special_kfunc_list[KF_bpf_res_spin_lock] ||
> -	       btf_id == special_kfunc_list[KF_bpf_res_spin_unlock] ||
> -	       btf_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave] ||
> -	       btf_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore];
> +	return btf_id_in_kfunc_table(btf_id, bpf_res_spin_lock_kfuncs,
> +				     ARRAY_SIZE(bpf_res_spin_lock_kfuncs));
>  }
>  
>  static bool is_bpf_arena_kfunc(u32 btf_id)
> @@ -13132,18 +13173,12 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
>  
>  	switch (node_field_type) {
>  	case BPF_LIST_NODE:
> -		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
> -		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> -		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
> -		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_del] ||
> -		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_is_first] ||
> -		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_is_last]);
> +		ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_list_node_api_kfuncs,
> +					    ARRAY_SIZE(bpf_list_node_api_kfuncs));
>  		break;
>  	case BPF_RB_NODE:
> -		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
> -		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
> -		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
> -		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]);
> +		ret = btf_id_in_kfunc_table(kfunc_btf_id, bpf_rbtree_node_api_kfuncs,
> +					    ARRAY_SIZE(bpf_rbtree_node_api_kfuncs));
>  		break;
>  	default:
>  		verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
Re: [PATCH bpf-next v7 5/5] bpf: refactor kfunc checks using table-driven approach in verifier
Posted by Kumar Kartikeya Dwivedi 4 weeks, 1 day ago
On Mon, 9 Mar 2026 at 07:45, Leon Hwang <leon.hwang@linux.dev> wrote:
>
> On 8/3/26 21:46, Chengkaitao wrote:
> > From: Kaitao Cheng <chengkaitao@kylinos.cn>
> >
> > Replace per-kfunc btf_id chains in list/rbtree/res_lock and graph node
> > checks with btf_id_in_kfunc_table() and static kfunc tables for easier
> > maintenance.
> >
>
> Such refactoring should be the first patch? Less churn. Then, update the
> list only.
>
> However, is_bpf_rbtree_api_kfunc(), is_bpf_res_spin_lock_kfunc(), and
> BPF_RB_NODE should be excluded, because you didn't touch them in this
> series.

I think moving clean up earlier makes some sense, but why exclude
rbtree and res spin lock?
Looks better to me to do them all.

>
> Thanks,
> Leon
>
> [...]
Re: [PATCH bpf-next v7 5/5] bpf: refactor kfunc checks using table-driven approach in verifier
Posted by Leon Hwang 4 weeks, 1 day ago
On 11/3/26 04:10, Kumar Kartikeya Dwivedi wrote:
> On Mon, 9 Mar 2026 at 07:45, Leon Hwang <leon.hwang@linux.dev> wrote:
>>
>> On 8/3/26 21:46, Chengkaitao wrote:
>>> From: Kaitao Cheng <chengkaitao@kylinos.cn>
>>>
>>> Replace per-kfunc btf_id chains in list/rbtree/res_lock and graph node
>>> checks with btf_id_in_kfunc_table() and static kfunc tables for easier
>>> maintenance.
>>>
>>
>> Such refactoring should be the first patch? Less churn. Then, update the
>> list only.
>>
>> However, is_bpf_rbtree_api_kfunc(), is_bpf_res_spin_lock_kfunc(), and
>> BPF_RB_NODE should be excluded, because you didn't touch them in this
>> series.
> 
> I think moving clean up earlier makes some sense, but why exclude
> rbtree and res spin lock?
> Looks better to me to do them all.
> 

Okay. Acceptable for me to keep them.

Thanks,
Leon