From: Kaitao Cheng <chengkaitao@kylinos.cn>
Refactor __bpf_list_add to accept (new, head, struct list_head **prev_ptr,
..) instead of (node, head, bool tail, ..). Load prev from *prev_ptr after
INIT_LIST_HEAD(h), so we never dereference an uninitialized h->prev when
head was 0-initialized (e.g. push_back passes &h->prev).
When prev is not the list head, validate that prev is in the list via
its owner.
Prepares for bpf_list_add_impl(head, new, prev, ..) to insert after a
given list node.
Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
---
kernel/bpf/helpers.c | 44 ++++++++++++++++++++++++++++----------------
1 file changed, 28 insertions(+), 16 deletions(-)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index dac346eb1e2f..a9665f97b3bc 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2379,11 +2379,13 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta
return (void *)p__refcounted_kptr;
}
-static int __bpf_list_add(struct bpf_list_node_kern *node,
+static int __bpf_list_add(struct bpf_list_node_kern *new,
struct bpf_list_head *head,
- bool tail, struct btf_record *rec, u64 off)
+ struct list_head **prev_ptr,
+ struct btf_record *rec, u64 off)
{
- struct list_head *n = &node->list_head, *h = (void *)head;
+ struct list_head *n = &new->list_head, *h = (void *)head;
+ struct list_head *prev;
/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
* called on its fields, so init here
@@ -2391,39 +2393,49 @@ static int __bpf_list_add(struct bpf_list_node_kern *node,
if (unlikely(!h->next))
INIT_LIST_HEAD(h);
- /* node->owner != NULL implies !list_empty(n), no need to separately
+ prev = *prev_ptr;
+
+ /* When prev is not the list head, it must be a node in this list. */
+ if (prev != h && WARN_ON_ONCE(READ_ONCE(container_of(
+ prev, struct bpf_list_node_kern, list_head)->owner) != head))
+ goto fail;
+
+ /* new->owner != NULL implies !list_empty(n), no need to separately
* check the latter
*/
- if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
- /* Only called from BPF prog, no need to migrate_disable */
- __bpf_obj_drop_impl((void *)n - off, rec, false);
- return -EINVAL;
- }
-
- tail ? list_add_tail(n, h) : list_add(n, h);
- WRITE_ONCE(node->owner, head);
+ if (cmpxchg(&new->owner, NULL, BPF_PTR_POISON))
+ goto fail;
+ list_add(n, prev);
+ WRITE_ONCE(new->owner, head);
return 0;
+
+fail:
+ /* Only called from BPF prog, no need to migrate_disable */
+ __bpf_obj_drop_impl((void *)n - off, rec, false);
+ return -EINVAL;
}
__bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
struct bpf_list_node *node,
void *meta__ign, u64 off)
{
- struct bpf_list_node_kern *n = (void *)node;
+ struct bpf_list_node_kern *new = (void *)node;
struct btf_struct_meta *meta = meta__ign;
+ struct list_head *h = (void *)head;
- return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
+ return __bpf_list_add(new, head, &h, meta ? meta->record : NULL, off);
}
__bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
struct bpf_list_node *node,
void *meta__ign, u64 off)
{
- struct bpf_list_node_kern *n = (void *)node;
+ struct bpf_list_node_kern *new = (void *)node;
struct btf_struct_meta *meta = meta__ign;
+ struct list_head *h = (void *)head;
- return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
+ return __bpf_list_add(new, head, &h->prev, meta ? meta->record : NULL, off);
}
static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head,
--
2.50.1 (Apple Git-155)
On Mon Mar 16, 2026 at 7:28 AM EDT, Chengkaitao wrote:
> From: Kaitao Cheng <chengkaitao@kylinos.cn>
>
> Refactor __bpf_list_add to accept (new, head, struct list_head **prev_ptr,
> ..) instead of (node, head, bool tail, ..). Load prev from *prev_ptr after
> INIT_LIST_HEAD(h), so we never dereference an uninitialized h->prev when
> head was 0-initialized (e.g. push_back passes &h->prev).
>
> When prev is not the list head, validate that prev is in the list via
> its owner.
>
> Prepares for bpf_list_add_impl(head, new, prev, ..) to insert after a
> given list node.
>
> Signed-off-by: Kaitao Cheng <chengkaitao@kylinos.cn>
> ---
> kernel/bpf/helpers.c | 44 ++++++++++++++++++++++++++++----------------
> 1 file changed, 28 insertions(+), 16 deletions(-)
>
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index dac346eb1e2f..a9665f97b3bc 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -2379,11 +2379,13 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta
> return (void *)p__refcounted_kptr;
> }
>
> -static int __bpf_list_add(struct bpf_list_node_kern *node,
> +static int __bpf_list_add(struct bpf_list_node_kern *new,
> struct bpf_list_head *head,
> - bool tail, struct btf_record *rec, u64 off)
> + struct list_head **prev_ptr,
> + struct btf_record *rec, u64 off)
> {
> - struct list_head *n = &node->list_head, *h = (void *)head;
> + struct list_head *n = &new->list_head, *h = (void *)head;
> + struct list_head *prev;
>
> /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
> * called on its fields, so init here
> @@ -2391,39 +2393,49 @@ static int __bpf_list_add(struct bpf_list_node_kern *node,
> if (unlikely(!h->next))
> INIT_LIST_HEAD(h);
>
> - /* node->owner != NULL implies !list_empty(n), no need to separately
> + prev = *prev_ptr;
> +
> + /* When prev is not the list head, it must be a node in this list. */
> + if (prev != h && WARN_ON_ONCE(READ_ONCE(container_of(
> + prev, struct bpf_list_node_kern, list_head)->owner) != head))
> + goto fail;
> +
This is pretty difficult to read, can you clean this up?
> + /* new->owner != NULL implies !list_empty(n), no need to separately
> * check the latter
> */
> - if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
> - /* Only called from BPF prog, no need to migrate_disable */
> - __bpf_obj_drop_impl((void *)n - off, rec, false);
> - return -EINVAL;
> - }
> -
> - tail ? list_add_tail(n, h) : list_add(n, h);
> - WRITE_ONCE(node->owner, head);
> + if (cmpxchg(&new->owner, NULL, BPF_PTR_POISON))
> + goto fail;
>
> + list_add(n, prev);
> + WRITE_ONCE(new->owner, head);
> return 0;
> +
> +fail:
> + /* Only called from BPF prog, no need to migrate_disable */
> + __bpf_obj_drop_impl((void *)n - off, rec, false);
> + return -EINVAL;
> }
>
> __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
> struct bpf_list_node *node,
> void *meta__ign, u64 off)
> {
> - struct bpf_list_node_kern *n = (void *)node;
> + struct bpf_list_node_kern *new = (void *)node;
I don't think this rename or the one in __bpf_list_add are useful, they
also kind of obfuscate the point of the patch by accident imo.
> struct btf_struct_meta *meta = meta__ign;
> + struct list_head *h = (void *)head;
>
> - return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
> + return __bpf_list_add(new, head, &h, meta ? meta->record : NULL, off);
> }
>
> __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
> struct bpf_list_node *node,
> void *meta__ign, u64 off)
> {
> - struct bpf_list_node_kern *n = (void *)node;
> + struct bpf_list_node_kern *new = (void *)node;
> struct btf_struct_meta *meta = meta__ign;
> + struct list_head *h = (void *)head;
>
> - return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
> + return __bpf_list_add(new, head, &h->prev, meta ? meta->record : NULL, off);
> }
>
> static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head,
© 2016 - 2026 Red Hat, Inc.