Implement bpf_task_work_schedule_* with an implicit bpf_prog_aux
argument, and change corresponding _impl funcs to call the new kfunc.
Update special kfunc checks in the verifier to accept both new and old
variants of the functions.
Update the selftests to use the new API with implicit argument.
Signed-off-by: Ihor Solodrai <ihor.solodrai@linux.dev>
---
kernel/bpf/helpers.c | 28 +++++++++++++++----
kernel/bpf/verifier.c | 8 +++++-
.../testing/selftests/bpf/progs/file_reader.c | 4 ++-
tools/testing/selftests/bpf/progs/task_work.c | 11 ++++++--
.../selftests/bpf/progs/task_work_fail.c | 16 ++++++++---
.../selftests/bpf/progs/task_work_stress.c | 5 ++--
.../bpf/progs/verifier_async_cb_context.c | 6 ++--
7 files changed, 59 insertions(+), 19 deletions(-)
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 23aa785c0f99..2e01973f2c18 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -4282,41 +4282,55 @@ static int bpf_task_work_schedule(struct task_struct *task, struct bpf_task_work
}
/**
- * bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL
+ * bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL
* mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
* @callback: pointer to BPF subprogram to call
- * @aux__prog: user should pass NULL
+ * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
+__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
+ void *map__map, bpf_task_work_callback_t callback,
+ struct bpf_prog_aux *aux)
+{
+ return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
+}
+
__bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
struct bpf_task_work *tw, void *map__map,
bpf_task_work_callback_t callback,
void *aux__prog)
{
- return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
+ return bpf_task_work_schedule_signal(task, tw, map__map, callback, aux__prog);
}
/**
- * bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME
+ * bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME
* mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
* @callback: pointer to BPF subprogram to call
- * @aux__prog: user should pass NULL
+ * @aux: pointer to bpf_prog_aux of the caller BPF program, implicitly set by the verifier
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
+__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
+ void *map__map, bpf_task_work_callback_t callback,
+ struct bpf_prog_aux *aux)
+{
+ return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_RESUME);
+}
+
__bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task,
struct bpf_task_work *tw, void *map__map,
bpf_task_work_callback_t callback,
void *aux__prog)
{
- return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
+ return bpf_task_work_schedule_resume(task, tw, map__map, callback, aux__prog);
}
static int make_file_dynptr(struct file *file, u32 flags, bool may_sleep,
@@ -4545,7 +4559,9 @@ BTF_ID_FLAGS(func, bpf_strncasestr);
BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
#endif
BTF_ID_FLAGS(func, bpf_stream_vprintk_impl)
+BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl)
+BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl)
BTF_ID_FLAGS(func, bpf_dynptr_from_file)
BTF_ID_FLAGS(func, bpf_dynptr_file_discard)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e5aeee554377..fdd17d19d5be 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12471,6 +12471,8 @@ enum special_kfunc_type {
KF_bpf_arena_free_pages,
KF_bpf_arena_reserve_pages,
KF_bpf_wq_set_callback,
+ KF_bpf_task_work_schedule_signal,
+ KF_bpf_task_work_schedule_resume,
};
BTF_ID_LIST(special_kfunc_list)
@@ -12549,10 +12551,14 @@ BTF_ID(func, bpf_arena_alloc_pages)
BTF_ID(func, bpf_arena_free_pages)
BTF_ID(func, bpf_arena_reserve_pages)
BTF_ID(func, bpf_wq_set_callback)
+BTF_ID(func, bpf_task_work_schedule_signal)
+BTF_ID(func, bpf_task_work_schedule_resume)
static bool is_task_work_add_kfunc(u32 func_id)
{
- return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] ||
+ return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
+ func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] ||
+ func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume] ||
func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl];
}
diff --git a/tools/testing/selftests/bpf/progs/file_reader.c b/tools/testing/selftests/bpf/progs/file_reader.c
index 4d756b623557..ff3270a0cb9b 100644
--- a/tools/testing/selftests/bpf/progs/file_reader.c
+++ b/tools/testing/selftests/bpf/progs/file_reader.c
@@ -77,7 +77,9 @@ int on_open_validate_file_read(void *c)
err = 1;
return 0;
}
- bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, task_work_callback, NULL);
+
+ bpf_task_work_schedule_signal(task, &work->tw, &arrmap, task_work_callback);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/task_work.c b/tools/testing/selftests/bpf/progs/task_work.c
index 663a80990f8f..eec422af20b8 100644
--- a/tools/testing/selftests/bpf/progs/task_work.c
+++ b/tools/testing/selftests/bpf/progs/task_work.c
@@ -66,7 +66,8 @@ int oncpu_hash_map(struct pt_regs *args)
if (!work)
return 0;
- bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
+ bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work);
+
return 0;
}
@@ -80,7 +81,9 @@ int oncpu_array_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
- bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL);
+
+ bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work);
+
return 0;
}
@@ -102,6 +105,8 @@ int oncpu_lru_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&lrumap, &key);
if (!work || work->data[0])
return 0;
- bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL);
+
+ bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/task_work_fail.c b/tools/testing/selftests/bpf/progs/task_work_fail.c
index 1270953fd092..557bdf9eb0fc 100644
--- a/tools/testing/selftests/bpf/progs/task_work_fail.c
+++ b/tools/testing/selftests/bpf/progs/task_work_fail.c
@@ -53,7 +53,9 @@ int mismatch_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
- bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
+
+ bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work);
+
return 0;
}
@@ -65,7 +67,9 @@ int no_map_task_work(struct pt_regs *args)
struct bpf_task_work tw;
task = bpf_get_current_task_btf();
- bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL);
+
+ bpf_task_work_schedule_resume(task, &tw, &hmap, process_work);
+
return 0;
}
@@ -76,7 +80,9 @@ int task_work_null(struct pt_regs *args)
struct task_struct *task;
task = bpf_get_current_task_btf();
- bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL);
+
+ bpf_task_work_schedule_resume(task, NULL, &hmap, process_work);
+
return 0;
}
@@ -91,6 +97,8 @@ int map_null(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
- bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL);
+
+ bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/task_work_stress.c b/tools/testing/selftests/bpf/progs/task_work_stress.c
index 55e555f7f41b..0cba36569714 100644
--- a/tools/testing/selftests/bpf/progs/task_work_stress.c
+++ b/tools/testing/selftests/bpf/progs/task_work_stress.c
@@ -51,8 +51,9 @@ int schedule_task_work(void *ctx)
if (!work)
return 0;
}
- err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap,
- process_work, NULL);
+ err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap,
+ process_work);
+
if (err)
__sync_fetch_and_add(&schedule_error, 1);
else
diff --git a/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c b/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
index 7efa9521105e..6c50aff03baa 100644
--- a/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
+++ b/tools/testing/selftests/bpf/progs/verifier_async_cb_context.c
@@ -156,7 +156,8 @@ int task_work_non_sleepable_prog(void *ctx)
if (!task)
return 0;
- bpf_task_work_schedule_resume_impl(task, &val->tw, &task_work_map, task_work_cb, NULL);
+ bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb);
+
return 0;
}
@@ -176,6 +177,7 @@ int task_work_sleepable_prog(void *ctx)
if (!task)
return 0;
- bpf_task_work_schedule_resume_impl(task, &val->tw, &task_work_map, task_work_cb, NULL);
+ bpf_task_work_schedule_resume(task, &val->tw, &task_work_map, task_work_cb);
+
return 0;
}
--
2.52.0
On Fri, Jan 9, 2026 at 10:50 AM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>
> +__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
> + void *map__map, bpf_task_work_callback_t callback,
> + struct bpf_prog_aux *aux)
> +{
> + return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
> +}
> +
> __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
> struct bpf_task_work *tw, void *map__map,
> bpf_task_work_callback_t callback,
> void *aux__prog)
> {
> - return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
> + return bpf_task_work_schedule_signal(task, tw, map__map, callback, aux__prog);
> }
I thought we decided that _impl() will not be marked as __bpf_kfunc
and will not be in BTF_ID(func, _impl).
We can mark it as __weak noinline and it will be in kallsyms.
That's all we need for the verifier and resolve_btfid, no?
Sorry, it's been a long time. I must have forgotten something.
On 1/9/26 11:58 AM, Alexei Starovoitov wrote:
> On Fri, Jan 9, 2026 at 10:50 AM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>>
>> +__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
>> + void *map__map, bpf_task_work_callback_t callback,
>> + struct bpf_prog_aux *aux)
>> +{
>> + return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
>> +}
>> +
>> __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
>> struct bpf_task_work *tw, void *map__map,
>> bpf_task_work_callback_t callback,
>> void *aux__prog)
>> {
>> - return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
>> + return bpf_task_work_schedule_signal(task, tw, map__map, callback, aux__prog);
>> }
>
> I thought we decided that _impl() will not be marked as __bpf_kfunc
> and will not be in BTF_ID(func, _impl).
> We can mark it as __weak noinline and it will be in kallsyms.
> That's all we need for the verifier and resolve_btfid, no?
>
> Sorry, it's been a long time. I must have forgotten something.
For the *generated* _impl kfuncs there is no decl tags and the ids are
absent from BTF_ID sets, yes.
However for the "legacy" cases it must be there for backwards
compatibility, as well as relevant verifier checks.
On Fri, Jan 9, 2026 at 12:02 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>
> On 1/9/26 11:58 AM, Alexei Starovoitov wrote:
> > On Fri, Jan 9, 2026 at 10:50 AM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
> >>
> >> +__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
> >> + void *map__map, bpf_task_work_callback_t callback,
> >> + struct bpf_prog_aux *aux)
> >> +{
> >> + return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
> >> +}
> >> +
> >> __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
> >> struct bpf_task_work *tw, void *map__map,
> >> bpf_task_work_callback_t callback,
> >> void *aux__prog)
> >> {
> >> - return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
> >> + return bpf_task_work_schedule_signal(task, tw, map__map, callback, aux__prog);
> >> }
> >
> > I thought we decided that _impl() will not be marked as __bpf_kfunc
> > and will not be in BTF_ID(func, _impl).
> > We can mark it as __weak noinline and it will be in kallsyms.
> > That's all we need for the verifier and resolve_btfid, no?
> >
> > Sorry, it's been a long time. I must have forgotten something.
>
> For the *generated* _impl kfuncs there is no decl tags and the ids are
> absent from BTF_ID sets, yes.
>
> However for the "legacy" cases it must be there for backwards
> compatibility, as well as relevant verifier checks.
I see.
I feel bpf_task_work_schedule_resume() is ok to break, since it's so new.
We can remove bpf_task_work_schedule_[resume|singal]_impl()
to avoid carrying forward forever.
bpf_stream_vprintk_impl() is not that clear. I would remove it too.
On 1/9/26 12:47 PM, Alexei Starovoitov wrote:
> On Fri, Jan 9, 2026 at 12:02 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>>
>> On 1/9/26 11:58 AM, Alexei Starovoitov wrote:
>>> On Fri, Jan 9, 2026 at 10:50 AM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>>>>
>>>> +__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
>>>> + void *map__map, bpf_task_work_callback_t callback,
>>>> + struct bpf_prog_aux *aux)
>>>> +{
>>>> + return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
>>>> +}
>>>> +
>>>> __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
>>>> struct bpf_task_work *tw, void *map__map,
>>>> bpf_task_work_callback_t callback,
>>>> void *aux__prog)
>>>> {
>>>> - return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
>>>> + return bpf_task_work_schedule_signal(task, tw, map__map, callback, aux__prog);
>>>> }
>>>
>>> I thought we decided that _impl() will not be marked as __bpf_kfunc
>>> and will not be in BTF_ID(func, _impl).
>>> We can mark it as __weak noinline and it will be in kallsyms.
>>> That's all we need for the verifier and resolve_btfid, no?
>>>
>>> Sorry, it's been a long time. I must have forgotten something.
>>
>> For the *generated* _impl kfuncs there is no decl tags and the ids are
>> absent from BTF_ID sets, yes.
>>
>> However for the "legacy" cases it must be there for backwards
>> compatibility, as well as relevant verifier checks.
>
> I see.
> I feel bpf_task_work_schedule_resume() is ok to break, since it's so new.
> We can remove bpf_task_work_schedule_[resume|singal]_impl()
> to avoid carrying forward forever.
>
> bpf_stream_vprintk_impl() is not that clear. I would remove it too.
That leaves only bpf_wq_set_callback_impl(). Can we break that too?
Then there won't be legacy cases at all. It was introduced in v6.16
along the with __prog suffix [1][2].
If we go this route, we could clean up __prog support/docs too.
I think it's worth it to make an "all or nothing" decision here:
either break all 4 existing kfuncs, or backwards-support all of them.
git tag --contains bc049387b41f | grep -v rc
v6.16
v6.17
v6.18
[1] https://lore.kernel.org/all/20250513142812.1021591-1-memxor@gmail.com/
[2] https://lore.kernel.org/all/20240420-bpf_wq-v2-13-6c986a5a741f@kernel.org/
On Fri, Jan 9, 2026 at 1:39 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>
> On 1/9/26 12:47 PM, Alexei Starovoitov wrote:
> > On Fri, Jan 9, 2026 at 12:02 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
> >>
> >> On 1/9/26 11:58 AM, Alexei Starovoitov wrote:
> >>> On Fri, Jan 9, 2026 at 10:50 AM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
> >>>>
> >>>> +__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
> >>>> + void *map__map, bpf_task_work_callback_t callback,
> >>>> + struct bpf_prog_aux *aux)
> >>>> +{
> >>>> + return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
> >>>> +}
> >>>> +
> >>>> __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
> >>>> struct bpf_task_work *tw, void *map__map,
> >>>> bpf_task_work_callback_t callback,
> >>>> void *aux__prog)
> >>>> {
> >>>> - return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
> >>>> + return bpf_task_work_schedule_signal(task, tw, map__map, callback, aux__prog);
> >>>> }
> >>>
> >>> I thought we decided that _impl() will not be marked as __bpf_kfunc
> >>> and will not be in BTF_ID(func, _impl).
> >>> We can mark it as __weak noinline and it will be in kallsyms.
> >>> That's all we need for the verifier and resolve_btfid, no?
> >>>
> >>> Sorry, it's been a long time. I must have forgotten something.
> >>
> >> For the *generated* _impl kfuncs there is no decl tags and the ids are
> >> absent from BTF_ID sets, yes.
> >>
> >> However for the "legacy" cases it must be there for backwards
> >> compatibility, as well as relevant verifier checks.
> >
> > I see.
> > I feel bpf_task_work_schedule_resume() is ok to break, since it's so new.
> > We can remove bpf_task_work_schedule_[resume|singal]_impl()
> > to avoid carrying forward forever.
> >
> > bpf_stream_vprintk_impl() is not that clear. I would remove it too.
>
> That leaves only bpf_wq_set_callback_impl(). Can we break that too?
Sounds like Benjamin is ok removing it.
So I think we can indeed remove them all.
> Then there won't be legacy cases at all. It was introduced in v6.16
> along the with __prog suffix [1][2].
>
> If we go this route, we could clean up __prog support/docs too.
>
> I think it's worth it to make an "all or nothing" decision here:
> either break all 4 existing kfuncs, or backwards-support all of them.
I don't see why "all or nothing" is a good thing.
It won't be "all" anyway.
We have bpf_rbtree_add_impl(), bpf_list_push_front_impl(), etc.
And those we cannot remove. sched-ext is using them.
Another few categories are bpf_obj_new_impl(), bpf_obj_drop_impl().
There are not __prog type, but conceptually the same thing and
KF_IMPLICIT_ARGS should support them too eventually.
> git tag --contains bc049387b41f | grep -v rc
> v6.16
> v6.17
> v6.18
>
> [1] https://lore.kernel.org/all/20250513142812.1021591-1-memxor@gmail.com/
> [2] https://lore.kernel.org/all/20240420-bpf_wq-v2-13-6c986a5a741f@kernel.org/
>
>
On 1/9/26 1:49 PM, Alexei Starovoitov wrote:
> On Fri, Jan 9, 2026 at 1:39 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>>
>> On 1/9/26 12:47 PM, Alexei Starovoitov wrote:
>>> On Fri, Jan 9, 2026 at 12:02 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>>>>
>>>> On 1/9/26 11:58 AM, Alexei Starovoitov wrote:
>>>>> On Fri, Jan 9, 2026 at 10:50 AM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>>>>>>
>>>>>> +__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
>>>>>> + void *map__map, bpf_task_work_callback_t callback,
>>>>>> + struct bpf_prog_aux *aux)
>>>>>> +{
>>>>>> + return bpf_task_work_schedule(task, tw, map__map, callback, aux, TWA_SIGNAL);
>>>>>> +}
>>>>>> +
>>>>>> __bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
>>>>>> struct bpf_task_work *tw, void *map__map,
>>>>>> bpf_task_work_callback_t callback,
>>>>>> void *aux__prog)
>>>>>> {
>>>>>> - return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
>>>>>> + return bpf_task_work_schedule_signal(task, tw, map__map, callback, aux__prog);
>>>>>> }
>>>>>
>>>>> I thought we decided that _impl() will not be marked as __bpf_kfunc
>>>>> and will not be in BTF_ID(func, _impl).
>>>>> We can mark it as __weak noinline and it will be in kallsyms.
>>>>> That's all we need for the verifier and resolve_btfid, no?
>>>>>
>>>>> Sorry, it's been a long time. I must have forgotten something.
>>>>
>>>> For the *generated* _impl kfuncs there is no decl tags and the ids are
>>>> absent from BTF_ID sets, yes.
>>>>
>>>> However for the "legacy" cases it must be there for backwards
>>>> compatibility, as well as relevant verifier checks.
>>>
>>> I see.
>>> I feel bpf_task_work_schedule_resume() is ok to break, since it's so new.
>>> We can remove bpf_task_work_schedule_[resume|singal]_impl()
>>> to avoid carrying forward forever.
>>>
>>> bpf_stream_vprintk_impl() is not that clear. I would remove it too.
>>
>> That leaves only bpf_wq_set_callback_impl(). Can we break that too?
>
> Sounds like Benjamin is ok removing it.
> So I think we can indeed remove them all.
>
>> Then there won't be legacy cases at all. It was introduced in v6.16
>> along the with __prog suffix [1][2].
>>
>> If we go this route, we could clean up __prog support/docs too.
>>
>> I think it's worth it to make an "all or nothing" decision here:
>> either break all 4 existing kfuncs, or backwards-support all of them.
>
> I don't see why "all or nothing" is a good thing.
> It won't be "all" anyway.
> We have bpf_rbtree_add_impl(), bpf_list_push_front_impl(), etc.
> And those we cannot remove. sched-ext is using them.
> Another few categories are bpf_obj_new_impl(), bpf_obj_drop_impl().
> There are not __prog type, but conceptually the same thing and
> KF_IMPLICIT_ARGS should support them too eventually.
I was thinking we could remove/simplify code relevant to backwards
compat of existing _impl kfuncs. But you're right, if we start using
implicit args for other types/kfuncs, the "legacy" case still has to
work.
Ok, in the next revision I'll remove all the __prog users, but leave
the "legacy" case support in place for future use.
>
>
>> git tag --contains bc049387b41f | grep -v rc
>> v6.16
>> v6.17
>> v6.18
>>
>> [1] https://lore.kernel.org/all/20250513142812.1021591-1-memxor@gmail.com/
>> [2] https://lore.kernel.org/all/20240420-bpf_wq-v2-13-6c986a5a741f@kernel.org/
>>
>>
On 1/9/26 1:56 PM, Ihor Solodrai wrote: > On 1/9/26 1:49 PM, Alexei Starovoitov wrote: >> On Fri, Jan 9, 2026 at 1:39 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote: >>> >>> [...] >>> >>>> I feel bpf_task_work_schedule_resume() is ok to break, since it's so new. >>>> We can remove bpf_task_work_schedule_[resume|singal]_impl() >>>> to avoid carrying forward forever. >>>> >>>> bpf_stream_vprintk_impl() is not that clear. I would remove it too. >>> >>> That leaves only bpf_wq_set_callback_impl(). Can we break that too? >> >> Sounds like Benjamin is ok removing it. >> So I think we can indeed remove them all. >> >>> Then there won't be legacy cases at all. It was introduced in v6.16 >>> along the with __prog suffix [1][2]. >>> >>> If we go this route, we could clean up __prog support/docs too. >>> >>> I think it's worth it to make an "all or nothing" decision here: >>> either break all 4 existing kfuncs, or backwards-support all of them. >> >> I don't see why "all or nothing" is a good thing. >> It won't be "all" anyway. >> We have bpf_rbtree_add_impl(), bpf_list_push_front_impl(), etc. >> And those we cannot remove. sched-ext is using them. >> Another few categories are bpf_obj_new_impl(), bpf_obj_drop_impl(). >> There are not __prog type, but conceptually the same thing and >> KF_IMPLICIT_ARGS should support them too eventually. > > I was thinking we could remove/simplify code relevant to backwards > compat of existing _impl kfuncs. But you're right, if we start using > implicit args for other types/kfuncs, the "legacy" case still has to > work. > > Ok, in the next revision I'll remove all the __prog users, but leave > the "legacy" case support in place for future use. I just had an off-list chat with Andrii, and we agreed that leaving the existing _impl kfuncs supported may be a good idea. It doesn't cost us much: we keep the mechanism for legacy functions anyways, so supporting bpf_wq_set_callback_impl() and co only requires keeping definitions in the kernel. The only benefit of *removing* these _impl functions is that we could clean up __prog support. But having backwards compat seems like a better deal. What do you think? > >> >> >>> git tag --contains bc049387b41f | grep -v rc >>> v6.16 >>> v6.17 >>> v6.18 >>> >>> [1] https://lore.kernel.org/all/20250513142812.1021591-1-memxor@gmail.com/ >>> [2] https://lore.kernel.org/all/20240420-bpf_wq-v2-13-6c986a5a741f@kernel.org/ >>> >>> >
On Mon, Jan 12, 2026 at 10:54 AM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
>
> On 1/9/26 1:56 PM, Ihor Solodrai wrote:
> > On 1/9/26 1:49 PM, Alexei Starovoitov wrote:
> >> On Fri, Jan 9, 2026 at 1:39 PM Ihor Solodrai <ihor.solodrai@linux.dev> wrote:
> >>>
> >>> [...]
> >>>
> >>>> I feel bpf_task_work_schedule_resume() is ok to break, since it's so new.
> >>>> We can remove bpf_task_work_schedule_[resume|singal]_impl()
> >>>> to avoid carrying forward forever.
> >>>>
> >>>> bpf_stream_vprintk_impl() is not that clear. I would remove it too.
> >>>
> >>> That leaves only bpf_wq_set_callback_impl(). Can we break that too?
> >>
> >> Sounds like Benjamin is ok removing it.
> >> So I think we can indeed remove them all.
> >>
> >>> Then there won't be legacy cases at all. It was introduced in v6.16
> >>> along the with __prog suffix [1][2].
> >>>
> >>> If we go this route, we could clean up __prog support/docs too.
> >>>
> >>> I think it's worth it to make an "all or nothing" decision here:
> >>> either break all 4 existing kfuncs, or backwards-support all of them.
> >>
> >> I don't see why "all or nothing" is a good thing.
> >> It won't be "all" anyway.
> >> We have bpf_rbtree_add_impl(), bpf_list_push_front_impl(), etc.
> >> And those we cannot remove. sched-ext is using them.
> >> Another few categories are bpf_obj_new_impl(), bpf_obj_drop_impl().
> >> There are not __prog type, but conceptually the same thing and
> >> KF_IMPLICIT_ARGS should support them too eventually.
> >
> > I was thinking we could remove/simplify code relevant to backwards
> > compat of existing _impl kfuncs. But you're right, if we start using
> > implicit args for other types/kfuncs, the "legacy" case still has to
> > work.
> >
> > Ok, in the next revision I'll remove all the __prog users, but leave
> > the "legacy" case support in place for future use.
>
> I just had an off-list chat with Andrii, and we agreed that leaving
> the existing _impl kfuncs supported may be a good idea.
>
> It doesn't cost us much: we keep the mechanism for legacy functions
> anyways, so supporting bpf_wq_set_callback_impl() and co only requires
> keeping definitions in the kernel.
>
> The only benefit of *removing* these _impl functions is that we could
> clean up __prog support.
>
> But having backwards compat seems like a better deal.
> What do you think?
>
I think that bit of __prog clean up might be worth doing after all.
The biggest difference for users if we remove _impl stuff from new
kernels would be an extra explicit xxx_impl kfunc declaration (it
won't come from vmlinux.h anymore) and then the following call pattern
(which probably will be hidden in the macro anyways):
if (xxx_impl)
xxx_impl(..., NULL);
else
xxx(...);
This will work on old and new kernels alike, so ok, let's just drop
all the _impl stuff. Can't do that for bpf_obj_new_impl() because
that's already used more widely, but for task_work, wq, stream -- they
are all fresh and not yet properly used in production.
>
> >
> >>
> >>
> >>> git tag --contains bc049387b41f | grep -v rc
> >>> v6.16
> >>> v6.17
> >>> v6.18
> >>>
> >>> [1] https://lore.kernel.org/all/20250513142812.1021591-1-memxor@gmail.com/
> >>> [2] https://lore.kernel.org/all/20240420-bpf_wq-v2-13-6c986a5a741f@kernel.org/
> >>>
> >>>
> >
>
© 2016 - 2026 Red Hat, Inc.