[PATCH bpf-next v7 2/5] bpf: Add bpf_list_add_impl to insert node after a given list node

Chengkaitao posted 5 patches 1 month ago
There is a newer version of this series
[PATCH bpf-next v7 2/5] bpf: Add bpf_list_add_impl to insert node after a given list node
Posted by Chengkaitao 1 month ago
From: Kaitao Cheng <chengkaitao@kylinos.cn>

Add a new kfunc bpf_list_add_impl(head, new, prev, meta, off) that
inserts 'new' after 'prev' in the BPF linked list. Both must be in
the same list; 'prev' must already be in the list. The new node must
be an owning reference (e.g. from bpf_obj_new); the kfunc consumes
that reference and the node becomes non-owning once inserted.

We have added an additional parameter bpf_list_head *head to
bpf_list_add_impl, as the verifier requires the head parameter to
check whether the lock is being held.

Returns 0 on success, -EINVAL if 'prev' is not in a list or 'new'
is already in a list (or duplicate insertion). On failure, the
kernel drops the passed-in node.

Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
 kernel/bpf/helpers.c  | 56 ++++++++++++++++++++++++++++++-------------
 kernel/bpf/verifier.c | 13 ++++++++--
 2 files changed, 50 insertions(+), 19 deletions(-)

diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 01b74c4ac00d..407520fde668 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2379,11 +2379,12 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta
 	return (void *)p__refcounted_kptr;
 }
 
-static int __bpf_list_add(struct bpf_list_node_kern *node,
-			  struct bpf_list_head *head,
-			  bool tail, struct btf_record *rec, u64 off)
+static int __bpf_list_add(struct bpf_list_head *head,
+			  struct bpf_list_node_kern *new,
+			  struct list_head *prev,
+			  struct btf_record *rec, u64 off)
 {
-	struct list_head *n = &node->list_head, *h = (void *)head;
+	struct list_head *n = &new->list_head, *h = (void *)head;
 
 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
 	 * called on its fields, so init here
@@ -2391,39 +2392,59 @@ static int __bpf_list_add(struct bpf_list_node_kern *node,
 	if (unlikely(!h->next))
 		INIT_LIST_HEAD(h);
 
-	/* node->owner != NULL implies !list_empty(n), no need to separately
+	/* When prev is not the list head, it must be a node in this list. */
+	if (prev != h && WARN_ON_ONCE(READ_ONCE(container_of(
+	    prev, struct bpf_list_node_kern, list_head)->owner) != head))
+		goto fail;
+
+	/* new->owner != NULL implies !list_empty(n), no need to separately
 	 * check the latter
 	 */
-	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
-		/* Only called from BPF prog, no need to migrate_disable */
-		__bpf_obj_drop_impl((void *)n - off, rec, false);
-		return -EINVAL;
-	}
-
-	tail ? list_add_tail(n, h) : list_add(n, h);
-	WRITE_ONCE(node->owner, head);
+	if (cmpxchg(&new->owner, NULL, BPF_PTR_POISON))
+		goto fail;
 
+	list_add(n, prev);
+	WRITE_ONCE(new->owner, head);
 	return 0;
+
+fail:
+	/* Only called from BPF prog, no need to migrate_disable */
+	__bpf_obj_drop_impl((void *)n - off, rec, false);
+	return -EINVAL;
 }
 
 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
 					 struct bpf_list_node *node,
 					 void *meta__ign, u64 off)
 {
-	struct bpf_list_node_kern *n = (void *)node;
+	struct bpf_list_node_kern *new = (void *)node;
 	struct btf_struct_meta *meta = meta__ign;
+	struct list_head *h = (void *)head;
 
-	return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
+	return __bpf_list_add(head, new, h, meta ? meta->record : NULL, off);
 }
 
 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
 					struct bpf_list_node *node,
 					void *meta__ign, u64 off)
 {
-	struct bpf_list_node_kern *n = (void *)node;
+	struct bpf_list_node_kern *new = (void *)node;
+	struct btf_struct_meta *meta = meta__ign;
+	struct list_head *h = (void *)head;
+
+	return __bpf_list_add(head, new, h->prev, meta ? meta->record : NULL, off);
+}
+
+__bpf_kfunc int bpf_list_add_impl(struct bpf_list_head *head,
+				  struct bpf_list_node *new,
+				  struct bpf_list_node *prev,
+				  void *meta__ign, u64 off)
+{
+	struct bpf_list_node_kern *kn = (void *)new, *kp = (void *)prev;
 	struct btf_struct_meta *meta = meta__ign;
 
-	return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
+	return __bpf_list_add(head, kn, &kp->list_head,
+			      meta ? meta->record : NULL, off);
 }
 
 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head,
@@ -4563,6 +4584,7 @@ BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_list_del, KF_ACQUIRE | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_list_add_impl)
 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c9557d3fb8dd..5f55b68ed935 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12459,6 +12459,7 @@ enum special_kfunc_type {
 	KF_bpf_refcount_acquire_impl,
 	KF_bpf_list_push_front_impl,
 	KF_bpf_list_push_back_impl,
+	KF_bpf_list_add_impl,
 	KF_bpf_list_pop_front,
 	KF_bpf_list_pop_back,
 	KF_bpf_list_del,
@@ -12520,6 +12521,7 @@ BTF_ID(func, bpf_obj_drop_impl)
 BTF_ID(func, bpf_refcount_acquire_impl)
 BTF_ID(func, bpf_list_push_front_impl)
 BTF_ID(func, bpf_list_push_back_impl)
+BTF_ID(func, bpf_list_add_impl)
 BTF_ID(func, bpf_list_pop_front)
 BTF_ID(func, bpf_list_pop_back)
 BTF_ID(func, bpf_list_del)
@@ -12996,6 +12998,7 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
 {
 	return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
 	       btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
+	       btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
 	       btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
 	       btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
 	       btf_id == special_kfunc_list[KF_bpf_list_del] ||
@@ -13122,6 +13125,7 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
 	case BPF_LIST_NODE:
 		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
 		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
+		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
 		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_del]);
 		break;
 	case BPF_RB_NODE:
@@ -14264,6 +14268,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 
 	if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
 	    meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
+	    meta.func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
 	    meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
 		release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
 		insn_aux->insert_off = regs[BPF_REG_2].off;
@@ -23230,13 +23235,17 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 		*cnt = 3;
 	} else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
 		   desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
+		   desc->func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
 		   desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
 		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
 		int struct_meta_reg = BPF_REG_3;
 		int node_offset_reg = BPF_REG_4;
 
-		/* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
-		if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
+		/* list/rbtree_add_impl have an extra arg (prev/less),
+		 * so args-to-fixup are in different regs.
+		 */
+		if (desc->func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
+		    desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
 			struct_meta_reg = BPF_REG_4;
 			node_offset_reg = BPF_REG_5;
 		}
-- 
2.50.1 (Apple Git-155)
Re: [PATCH bpf-next v7 2/5] bpf: Add bpf_list_add_impl to insert node after a given list node
Posted by Kumar Kartikeya Dwivedi 4 weeks, 1 day ago
On Sun, 8 Mar 2026 at 14:47, Chengkaitao <pilgrimtao@gmail.com> wrote:
>
> From: Kaitao Cheng <chengkaitao@kylinos.cn>
>
> Add a new kfunc bpf_list_add_impl(head, new, prev, meta, off) that
> inserts 'new' after 'prev' in the BPF linked list. Both must be in
> the same list; 'prev' must already be in the list. The new node must
> be an owning reference (e.g. from bpf_obj_new); the kfunc consumes
> that reference and the node becomes non-owning once inserted.
>
> We have added an additional parameter bpf_list_head *head to
> bpf_list_add_impl, as the verifier requires the head parameter to
> check whether the lock is being held.
>
> Returns 0 on success, -EINVAL if 'prev' is not in a list or 'new'
> is already in a list (or duplicate insertion). On failure, the
> kernel drops the passed-in node.
>
> Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
> ---
>  kernel/bpf/helpers.c  | 56 ++++++++++++++++++++++++++++++-------------
>  kernel/bpf/verifier.c | 13 ++++++++--
>  2 files changed, 50 insertions(+), 19 deletions(-)
>
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 01b74c4ac00d..407520fde668 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -2379,11 +2379,12 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta
>         return (void *)p__refcounted_kptr;
>  }
>
> -static int __bpf_list_add(struct bpf_list_node_kern *node,
> -                         struct bpf_list_head *head,
> -                         bool tail, struct btf_record *rec, u64 off)
> +static int __bpf_list_add(struct bpf_list_head *head,
> +                         struct bpf_list_node_kern *new,
> +                         struct list_head *prev,
> +                         struct btf_record *rec, u64 off)
>  {
> -       struct list_head *n = &node->list_head, *h = (void *)head;
> +       struct list_head *n = &new->list_head, *h = (void *)head;
>
>         /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
>          * called on its fields, so init here
> @@ -2391,39 +2392,59 @@ static int __bpf_list_add(struct bpf_list_node_kern *node,
>         if (unlikely(!h->next))
>                 INIT_LIST_HEAD(h);
>
> -       /* node->owner != NULL implies !list_empty(n), no need to separately
> +       /* When prev is not the list head, it must be a node in this list. */
> +       if (prev != h && WARN_ON_ONCE(READ_ONCE(container_of(
> +           prev, struct bpf_list_node_kern, list_head)->owner) != head))
> +               goto fail;

There is a slight issue here, if head is not initialized, prev will be
NULL here, since it passes h->prev.
So we'll do a bad deref. I think we should probably pass a pointer to
prev (list_head **) and then load it after INIT_LIST_HEAD(h) is done.
prev != h check looks ok (since we want to establish that prev is not
a node) otherwise.

Probably also add a test for such a case to catch this sort of bug.
You can see whether it crashes without changing your patch, and
doesn't with the fix.

> +
> +       /* new->owner != NULL implies !list_empty(n), no need to separately
>          * check the latter
>          */
> -       if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
> -               /* Only called from BPF prog, no need to migrate_disable */
> -               __bpf_obj_drop_impl((void *)n - off, rec, false);
> -               return -EINVAL;
> -       }
> -
> -       tail ? list_add_tail(n, h) : list_add(n, h);
> -       WRITE_ONCE(node->owner, head);
> +       if (cmpxchg(&new->owner, NULL, BPF_PTR_POISON))
> +               goto fail;
>
> +       list_add(n, prev);
> +       WRITE_ONCE(new->owner, head);
>         return 0;
> +
> +fail:
> +       /* Only called from BPF prog, no need to migrate_disable */
> +       __bpf_obj_drop_impl((void *)n - off, rec, false);
> +       return -EINVAL;
>  }
>
> [...]
> @@ -12996,6 +12998,7 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
>  {
>         return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
>                btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> +              btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
>                btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
>                btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
>                btf_id == special_kfunc_list[KF_bpf_list_del] ||
> @@ -13122,6 +13125,7 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
>         case BPF_LIST_NODE:
>                 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
>                        kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> +                      kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
>                        kfunc_btf_id == special_kfunc_list[KF_bpf_list_del]);
>                 break;
>         case BPF_RB_NODE:
> @@ -14264,6 +14268,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
>
>         if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
>             meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> +           meta.func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
>             meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
>                 release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
>                 insn_aux->insert_off = regs[BPF_REG_2].off;

Please rebase patches on bpf-next/master always before sending, this
one didn't apply cleanly.


> @@ -23230,13 +23235,17 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
>                 *cnt = 3;
>         } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
>                    desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
> +                  desc->func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
>                    desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
>                 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
>                 int struct_meta_reg = BPF_REG_3;
>                 int node_offset_reg = BPF_REG_4;
>
> -               /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
> -               if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
> +               /* list/rbtree_add_impl have an extra arg (prev/less),
> +                * so args-to-fixup are in different regs.
> +                */
> +               if (desc->func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
> +                   desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
>                         struct_meta_reg = BPF_REG_4;
>                         node_offset_reg = BPF_REG_5;
>                 }
> --
> 2.50.1 (Apple Git-155)
>
>
Re: [PATCH bpf-next v7 2/5] bpf: Add bpf_list_add_impl to insert node after a given list node
Posted by Leon Hwang 1 month ago
On 8/3/26 21:46, Chengkaitao wrote:
> From: Kaitao Cheng <chengkaitao@kylinos.cn>
> 
> Add a new kfunc bpf_list_add_impl(head, new, prev, meta, off) that
> inserts 'new' after 'prev' in the BPF linked list. Both must be in
> the same list; 'prev' must already be in the list. The new node must
> be an owning reference (e.g. from bpf_obj_new); the kfunc consumes
> that reference and the node becomes non-owning once inserted.
> 
> We have added an additional parameter bpf_list_head *head to
> bpf_list_add_impl, as the verifier requires the head parameter to
> check whether the lock is being held.
> 
> Returns 0 on success, -EINVAL if 'prev' is not in a list or 'new'
> is already in a list (or duplicate insertion). On failure, the
> kernel drops the passed-in node.
> 
> Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
> ---
>  kernel/bpf/helpers.c  | 56 ++++++++++++++++++++++++++++++-------------
>  kernel/bpf/verifier.c | 13 ++++++++--
>  2 files changed, 50 insertions(+), 19 deletions(-)
> 
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 01b74c4ac00d..407520fde668 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -2379,11 +2379,12 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta
>  	return (void *)p__refcounted_kptr;
>  }
>  
> -static int __bpf_list_add(struct bpf_list_node_kern *node,
> -			  struct bpf_list_head *head,
> -			  bool tail, struct btf_record *rec, u64 off)
> +static int __bpf_list_add(struct bpf_list_head *head,
> +			  struct bpf_list_node_kern *new,
> +			  struct list_head *prev,
> +			  struct btf_record *rec, u64 off)
>  {
> -	struct list_head *n = &node->list_head, *h = (void *)head;
> +	struct list_head *n = &new->list_head, *h = (void *)head;
>  
>  	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
>  	 * called on its fields, so init here
> @@ -2391,39 +2392,59 @@ static int __bpf_list_add(struct bpf_list_node_kern *node,
>  	if (unlikely(!h->next))
>  		INIT_LIST_HEAD(h);
>  
> -	/* node->owner != NULL implies !list_empty(n), no need to separately
> +	/* When prev is not the list head, it must be a node in this list. */
> +	if (prev != h && WARN_ON_ONCE(READ_ONCE(container_of(
> +	    prev, struct bpf_list_node_kern, list_head)->owner) != head))
> +		goto fail;
> +
> +	/* new->owner != NULL implies !list_empty(n), no need to separately
>  	 * check the latter
>  	 */
> -	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
> -		/* Only called from BPF prog, no need to migrate_disable */
> -		__bpf_obj_drop_impl((void *)n - off, rec, false);
> -		return -EINVAL;
> -	}
> -
> -	tail ? list_add_tail(n, h) : list_add(n, h);
> -	WRITE_ONCE(node->owner, head);
> +	if (cmpxchg(&new->owner, NULL, BPF_PTR_POISON))
> +		goto fail;
>  
> +	list_add(n, prev);
> +	WRITE_ONCE(new->owner, head);
>  	return 0;
> +
> +fail:
> +	/* Only called from BPF prog, no need to migrate_disable */
> +	__bpf_obj_drop_impl((void *)n - off, rec, false);
> +	return -EINVAL;
>  }
>  

This refactoring is worth. But it should be a preparatory patch.

>  __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
>  					 struct bpf_list_node *node,
>  					 void *meta__ign, u64 off)
>  {
> -	struct bpf_list_node_kern *n = (void *)node;
> +	struct bpf_list_node_kern *new = (void *)node;
>  	struct btf_struct_meta *meta = meta__ign;
> +	struct list_head *h = (void *)head;
>  
> -	return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
> +	return __bpf_list_add(head, new, h, meta ? meta->record : NULL, off);

Strange to change the positions of the first two args.

Thanks,
Leon

>  }
>  
>  __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
>  					struct bpf_list_node *node,
>  					void *meta__ign, u64 off)
>  {
> -	struct bpf_list_node_kern *n = (void *)node;
> +	struct bpf_list_node_kern *new = (void *)node;
> +	struct btf_struct_meta *meta = meta__ign;
> +	struct list_head *h = (void *)head;
> +
> +	return __bpf_list_add(head, new, h->prev, meta ? meta->record : NULL, off);
> +}
> +
> +__bpf_kfunc int bpf_list_add_impl(struct bpf_list_head *head,
> +				  struct bpf_list_node *new,
> +				  struct bpf_list_node *prev,
> +				  void *meta__ign, u64 off)
> +{
> +	struct bpf_list_node_kern *kn = (void *)new, *kp = (void *)prev;
>  	struct btf_struct_meta *meta = meta__ign;
>  
> -	return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
> +	return __bpf_list_add(head, kn, &kp->list_head,
> +			      meta ? meta->record : NULL, off);
>  }
>  
>  static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head,
> @@ -4563,6 +4584,7 @@ BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_list_del, KF_ACQUIRE | KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL)
> +BTF_ID_FLAGS(func, bpf_list_add_impl)
>  BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
>  BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index c9557d3fb8dd..5f55b68ed935 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -12459,6 +12459,7 @@ enum special_kfunc_type {
>  	KF_bpf_refcount_acquire_impl,
>  	KF_bpf_list_push_front_impl,
>  	KF_bpf_list_push_back_impl,
> +	KF_bpf_list_add_impl,
>  	KF_bpf_list_pop_front,
>  	KF_bpf_list_pop_back,
>  	KF_bpf_list_del,
> @@ -12520,6 +12521,7 @@ BTF_ID(func, bpf_obj_drop_impl)
>  BTF_ID(func, bpf_refcount_acquire_impl)
>  BTF_ID(func, bpf_list_push_front_impl)
>  BTF_ID(func, bpf_list_push_back_impl)
> +BTF_ID(func, bpf_list_add_impl)
>  BTF_ID(func, bpf_list_pop_front)
>  BTF_ID(func, bpf_list_pop_back)
>  BTF_ID(func, bpf_list_del)
> @@ -12996,6 +12998,7 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
>  {
>  	return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
>  	       btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> +	       btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
>  	       btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
>  	       btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
>  	       btf_id == special_kfunc_list[KF_bpf_list_del] ||
> @@ -13122,6 +13125,7 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
>  	case BPF_LIST_NODE:
>  		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
>  		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> +		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
>  		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_del]);
>  		break;
>  	case BPF_RB_NODE:
> @@ -14264,6 +14268,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
>  
>  	if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
>  	    meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> +	    meta.func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
>  	    meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
>  		release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
>  		insn_aux->insert_off = regs[BPF_REG_2].off;
> @@ -23230,13 +23235,17 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
>  		*cnt = 3;
>  	} else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
>  		   desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
> +		   desc->func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
>  		   desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
>  		struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
>  		int struct_meta_reg = BPF_REG_3;
>  		int node_offset_reg = BPF_REG_4;
>  
> -		/* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
> -		if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
> +		/* list/rbtree_add_impl have an extra arg (prev/less),
> +		 * so args-to-fixup are in different regs.
> +		 */
> +		if (desc->func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
> +		    desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
>  			struct_meta_reg = BPF_REG_4;
>  			node_offset_reg = BPF_REG_5;
>  		}
Re: [PATCH bpf-next v7 2/5] bpf: Add bpf_list_add_impl to insert node after a given list node
Posted by bot+bpf-ci@kernel.org 1 month ago
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c

[ ... ]

> +__bpf_kfunc int bpf_list_add_impl(struct bpf_list_head *head,
> +				  struct bpf_list_node *new,
> +				  struct bpf_list_node *prev,
> +				  void *meta__ign, u64 off)
> +{
> +	struct bpf_list_node_kern *kn = (void *)new, *kp = (void *)prev;
> +	struct btf_struct_meta *meta = meta__ign;
> +
> +	return __bpf_list_add(head, kn, &kp->list_head,
> +			      meta ? meta->record : NULL, off);
> +}

The prev argument here is typed as struct bpf_list_node *, so the
verifier classifies it as KF_ARG_PTR_TO_LIST_NODE.  In
check_kfunc_args(), the KF_ARG_PTR_TO_LIST_NODE case requires:

    if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
        ...
        return -EINVAL;
    }
    if (!reg->ref_obj_id) {
        ...
        return -EINVAL;
    }

This means prev must be an owning reference.  However, the natural
way to obtain a reference to a node already in the list is via
bpf_list_front() or bpf_list_back(), which return non-owning
references (PTR_TO_BTF_ID with NON_OWN_REF set via
ref_set_non_owning).  These will not pass the check above.

Compare with KF_ARG_PTR_TO_RB_NODE, which differentiates between
insertion (requires owning ref) and other operations (allows
non-owning refs):

    case KF_ARG_PTR_TO_RB_NODE:
        if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
            /* owning ref required */
        } else {
            /* non-owning or refcounted allowed */
        }

Should KF_ARG_PTR_TO_LIST_NODE have a similar distinction, so that
for bpf_list_add_impl only the new argument (R2) requires an owning
reference while prev (R3) can accept non-owning references?

Without this, the API only works when prev is an owning reference to
a refcounted object still in the list (e.g. obtained from
bpf_rbtree_remove on a node that is in both a tree and a list).
This is the pattern used in the selftests, but it limits the kfunc
to a narrow use case that the commit message does not describe.

This is not addressed in any later commit in the series (checked
through 4a7e9776ee29 "bpf: refactor kfunc checks using table-driven
approach in verifier").


---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md

CI run summary: https://github.com/kernel-patches/bpf/actions/runs/22822524869