[PATCH bpf-next 3/3] bpf: classify rbtree kfuncs with BPF_VERIF_KFUNC_DEF sets

Chengkaitao posted 3 patches 3 days, 19 hours ago
[PATCH bpf-next 3/3] bpf: classify rbtree kfuncs with BPF_VERIF_KFUNC_DEF sets
Posted by Chengkaitao 3 days, 19 hours ago
From: Kaitao Cheng <chengkaitao@kylinos.cn>

Remove verifier logic that enumerated special_kfunc_list[KF_bpf_rbtree_*]
and compared against it. Use BPF_VERIF_KFUNC_DEF so each kfunc's
verifier-facing safety classification is declared in one place next
to the implementation.

When adding new kfuncs later, you no longer need to thread ad-hoc
allowlists through scattered verifier branches—tagging the kfunc once
with BPF_VERIF_KFUNC_DEF is enough.

This patch only migrates the rbtree kfuncs; other kfunc families can
be converted the same way in follow-up work.

Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
 include/asm-generic/btf_ids.lds.h |  5 +++-
 kernel/bpf/helpers.c              |  7 +++++
 kernel/bpf/verifier.c             | 44 +++++++++++--------------------
 3 files changed, 26 insertions(+), 30 deletions(-)

diff --git a/include/asm-generic/btf_ids.lds.h b/include/asm-generic/btf_ids.lds.h
index 7579ba58f5ff..879391725d97 100644
--- a/include/asm-generic/btf_ids.lds.h
+++ b/include/asm-generic/btf_ids.lds.h
@@ -16,7 +16,10 @@
 		KEEP(*(.BTF_ids.##sfx))						\
 		__BTF_ids_seg_end_##sfx = .;
 
-#define BTF_IDS_VERIFIER_SUBSEGS
+#define BTF_IDS_VERIFIER_SUBSEGS						\
+	BTF_IDS_SUBSEG(bpf_verif_kfunc_rbtree_add)				\
+	BTF_IDS_SUBSEG(bpf_verif_kfunc_rbtree_graph_node)			\
+	BTF_IDS_SUBSEG(bpf_verif_kfunc_rbtree_api)
 
 #endif /* CONFIG_DEBUG_INFO_BTF */
 
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 2d8538bf4cfa..d4a8b0b98210 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2606,6 +2606,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
 	WRITE_ONCE(node_internal->owner, NULL);
 	return (struct bpf_rb_node *)n;
 }
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_remove, rbtree_api, rbtree_graph_node)
 
 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
  * program
@@ -2667,6 +2668,7 @@ __bpf_kfunc int bpf_rbtree_add(struct bpf_rb_root *root,
 
 	return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
 }
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_add, rbtree_api, rbtree_add, rbtree_graph_node)
 
 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
 				    bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
@@ -2674,6 +2676,7 @@ __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node
 {
 	return bpf_rbtree_add(root, node, less, meta__ign, off);
 }
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_add_impl, rbtree_api, rbtree_add, rbtree_graph_node)
 
 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
 {
@@ -2681,6 +2684,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
 
 	return (struct bpf_rb_node *)rb_first_cached(r);
 }
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_first, rbtree_api)
 
 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
 {
@@ -2688,6 +2692,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root)
 
 	return (struct bpf_rb_node *)r->rb_root.rb_node;
 }
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_root, rbtree_api)
 
 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node)
 {
@@ -2698,6 +2703,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct
 
 	return (struct bpf_rb_node *)node_internal->rb_node.rb_left;
 }
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_left, rbtree_api, rbtree_graph_node)
 
 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node)
 {
@@ -2708,6 +2714,7 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struc
 
 	return (struct bpf_rb_node *)node_internal->rb_node.rb_right;
 }
+BPF_VERIF_KFUNC_DEF(bpf_rbtree_right, rbtree_api, rbtree_graph_node)
 
 /**
  * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8c1cf2eb6cbb..db9ba47903ef 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12714,6 +12714,10 @@ BTF_ID(func, bpf_session_is_return)
 BTF_ID(func, bpf_stream_vprintk)
 BTF_ID(func, bpf_stream_print_stack)
 
+BTF_SET_SUB(bpf_verif_kfunc_rbtree_add)
+BTF_SET_SUB(bpf_verif_kfunc_rbtree_graph_node)
+BTF_SET_SUB(bpf_verif_kfunc_rbtree_api)
+
 static bool is_bpf_obj_new_kfunc(u32 func_id)
 {
 	return func_id == special_kfunc_list[KF_bpf_obj_new] ||
@@ -12752,12 +12756,6 @@ static bool is_bpf_list_push_kfunc(u32 func_id)
 	       func_id == special_kfunc_list[KF_bpf_list_push_back_impl];
 }
 
-static bool is_bpf_rbtree_add_kfunc(u32 func_id)
-{
-	return func_id == special_kfunc_list[KF_bpf_rbtree_add] ||
-	       func_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
-}
-
 static bool is_task_work_add_kfunc(u32 func_id)
 {
 	return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
@@ -13162,16 +13160,6 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
 	       btf_id == special_kfunc_list[KF_bpf_list_back];
 }
 
-static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
-{
-	return is_bpf_rbtree_add_kfunc(btf_id) ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_first] ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_root] ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
-	       btf_id == special_kfunc_list[KF_bpf_rbtree_right];
-}
-
 static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
 {
 	return btf_id == special_kfunc_list[KF_bpf_iter_num_new] ||
@@ -13182,7 +13170,7 @@ static bool is_bpf_iter_num_api_kfunc(u32 btf_id)
 static bool is_bpf_graph_api_kfunc(u32 btf_id)
 {
 	return is_bpf_list_api_kfunc(btf_id) ||
-	       is_bpf_rbtree_api_kfunc(btf_id) ||
+	       btf_id_set_contains(&bpf_verif_kfunc_rbtree_api, btf_id) ||
 	       is_bpf_refcount_acquire_kfunc(btf_id);
 }
 
@@ -13216,7 +13204,7 @@ static bool kfunc_spin_allowed(u32 btf_id)
 
 static bool is_sync_callback_calling_kfunc(u32 btf_id)
 {
-	return is_bpf_rbtree_add_kfunc(btf_id);
+	return btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, btf_id);
 }
 
 static bool is_async_callback_calling_kfunc(u32 btf_id)
@@ -13244,7 +13232,7 @@ static bool is_callback_calling_kfunc(u32 btf_id)
 
 static bool is_rbtree_lock_required_kfunc(u32 btf_id)
 {
-	return is_bpf_rbtree_api_kfunc(btf_id);
+	return btf_id_set_contains(&bpf_verif_kfunc_rbtree_api, btf_id);
 }
 
 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
@@ -13258,7 +13246,7 @@ static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
 		ret = is_bpf_list_api_kfunc(kfunc_btf_id);
 		break;
 	case BPF_RB_ROOT:
-		ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
+		ret = btf_id_set_contains(&bpf_verif_kfunc_rbtree_api, kfunc_btf_id);
 		break;
 	default:
 		verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
@@ -13283,10 +13271,7 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
 		ret = is_bpf_list_push_kfunc(kfunc_btf_id);
 		break;
 	case BPF_RB_NODE:
-		ret = (is_bpf_rbtree_add_kfunc(kfunc_btf_id) ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] ||
-		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]);
+		ret = btf_id_set_contains(&bpf_verif_kfunc_rbtree_graph_node, kfunc_btf_id);
 		break;
 	default:
 		verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
@@ -13823,7 +13808,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
 				return ret;
 			break;
 		case KF_ARG_PTR_TO_RB_NODE:
-			if (is_bpf_rbtree_add_kfunc(meta->func_id)) {
+			if (btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, meta->func_id)) {
 				if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
 					verbose(env, "arg#%d expected pointer to allocated object\n", i);
 					return -EINVAL;
@@ -14305,7 +14290,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 	if (err < 0)
 		return err;
 
-	if (is_bpf_rbtree_add_kfunc(meta.func_id)) {
+	if (btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, meta.func_id)) {
 		err = push_callback_call(env, insn, insn_idx, meta.subprogno,
 					 set_rbtree_add_callback_state);
 		if (err) {
@@ -14409,7 +14394,8 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 			return err;
 	}
 
-	if (is_bpf_list_push_kfunc(meta.func_id) || is_bpf_rbtree_add_kfunc(meta.func_id)) {
+	if (is_bpf_list_push_kfunc(meta.func_id) ||
+	    btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, meta.func_id)) {
 		release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
 		insn_aux->insert_off = regs[BPF_REG_2].var_off.value;
 		insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id);
@@ -23438,13 +23424,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 		insn_buf[2] = *insn;
 		*cnt = 3;
 	} else if (is_bpf_list_push_kfunc(desc->func_id) ||
-		   is_bpf_rbtree_add_kfunc(desc->func_id)) {
+		   btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, desc->func_id)) {
 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
 		int struct_meta_reg = BPF_REG_3;
 		int node_offset_reg = BPF_REG_4;
 
 		/* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
-		if (is_bpf_rbtree_add_kfunc(desc->func_id)) {
+		if (btf_id_set_contains(&bpf_verif_kfunc_rbtree_add, desc->func_id)) {
 			struct_meta_reg = BPF_REG_4;
 			node_offset_reg = BPF_REG_5;
 		}
-- 
2.43.0