.../testing/selftests/bpf/bpf_experimental.h | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-)
Recent x86 kernels export __preempt_count as a ksym, while some old kernels
between v6.1 and v6.14 expose the preemption counter via
pcpu_hot.preempt_count. The existing selftest helper unconditionally
dereferenced __preempt_count, which breaks BPF program loading on such old
kernels.
Make the x86 preemption count lookup version-agnostic by:
- Marking __preempt_count and pcpu_hot as weak ksyms.
- Introducing a BTF-described pcpu_hot___local layout with
preserve_access_index.
- Selecting the appropriate access path at runtime using ksym availability
and bpf_ksym_exists() and bpf_core_field_exists().
This allows a single BPF binary to run correctly across kernel versions
(e.g., v6.18 vs. v6.13) without relying on compile-time version checks.
Signed-off-by: Changwoo Min <changwoo@igalia.com>
---
.../testing/selftests/bpf/bpf_experimental.h | 21 +++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index a39576c8ba04..acabf869d2c4 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -614,7 +614,13 @@ extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
extern bool CONFIG_PREEMPT_RT __kconfig __weak;
#ifdef bpf_target_x86
-extern const int __preempt_count __ksym;
+extern const int __preempt_count __ksym __weak;
+
+struct pcpu_hot___local {
+ int preempt_count;
+} __attribute__((preserve_access_index));
+
+extern struct pcpu_hot___local pcpu_hot __ksym __weak;
#endif
struct task_struct___preempt_rt {
@@ -624,7 +630,18 @@ struct task_struct___preempt_rt {
static inline int get_preempt_count(void)
{
#if defined(bpf_target_x86)
- return *(int *) bpf_this_cpu_ptr(&__preempt_count);
+ /* By default, read the per-CPU __preempt_count. */
+ if (bpf_ksym_exists(&__preempt_count))
+ return *(int *) bpf_this_cpu_ptr(&__preempt_count);
+
+ /* If __preempt_count does not exist, try to read preempt_count under
+ * struct pcpu_hot. Between v6.1 and v6.14 -- more specifically,
+ * [64701838bf057, 46e8fff6d45fe), preempt_count had been managed
+ * under struct pcpu_hot.
+ */
+ if (bpf_core_field_exists(pcpu_hot.preempt_count))
+ return ((struct pcpu_hot___local *)
+ bpf_this_cpu_ptr(&pcpu_hot))->preempt_count;
#elif defined(bpf_target_arm64)
return bpf_get_current_task_btf()->thread_info.preempt.count;
#endif
--
2.52.0
On Thu, Jan 29, 2026 at 6:19 PM Changwoo Min <changwoo@igalia.com> wrote:
>
> Recent x86 kernels export __preempt_count as a ksym, while some old kernels
> between v6.1 and v6.14 expose the preemption counter via
> pcpu_hot.preempt_count. The existing selftest helper unconditionally
> dereferenced __preempt_count, which breaks BPF program loading on such old
> kernels.
>
> Make the x86 preemption count lookup version-agnostic by:
> - Marking __preempt_count and pcpu_hot as weak ksyms.
> - Introducing a BTF-described pcpu_hot___local layout with
> preserve_access_index.
> - Selecting the appropriate access path at runtime using ksym availability
> and bpf_ksym_exists() and bpf_core_field_exists().
>
> This allows a single BPF binary to run correctly across kernel versions
> (e.g., v6.18 vs. v6.13) without relying on compile-time version checks.
>
> Signed-off-by: Changwoo Min <changwoo@igalia.com>
> ---
> .../testing/selftests/bpf/bpf_experimental.h | 21 +++++++++++++++++--
> 1 file changed, 19 insertions(+), 2 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
> index a39576c8ba04..acabf869d2c4 100644
> --- a/tools/testing/selftests/bpf/bpf_experimental.h
> +++ b/tools/testing/selftests/bpf/bpf_experimental.h
> @@ -614,7 +614,13 @@ extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
>
> extern bool CONFIG_PREEMPT_RT __kconfig __weak;
> #ifdef bpf_target_x86
> -extern const int __preempt_count __ksym;
> +extern const int __preempt_count __ksym __weak;
> +
> +struct pcpu_hot___local {
> + int preempt_count;
> +} __attribute__((preserve_access_index));
> +
> +extern struct pcpu_hot___local pcpu_hot __ksym __weak;
> #endif
>
> struct task_struct___preempt_rt {
> @@ -624,7 +630,18 @@ struct task_struct___preempt_rt {
> static inline int get_preempt_count(void)
> {
> #if defined(bpf_target_x86)
> - return *(int *) bpf_this_cpu_ptr(&__preempt_count);
> + /* By default, read the per-CPU __preempt_count. */
> + if (bpf_ksym_exists(&__preempt_count))
> + return *(int *) bpf_this_cpu_ptr(&__preempt_count);
> +
> + /* If __preempt_count does not exist, try to read preempt_count under
Please use proper kernel comment style in the future.
We don't use old networking style in bpf anymore.
I fixed it up while applying.
Also use [PATCH bpf-next] in the subject to help CI identify the tree.
Hi Alexei, On 1/31/26 5:23 AM, Alexei Starovoitov wrote: > On Thu, Jan 29, 2026 at 6:19 PM Changwoo Min <changwoo@igalia.com> wrote: > > Please use proper kernel comment style in the future. > We don't use old networking style in bpf anymore. > I fixed it up while applying. > > Also use [PATCH bpf-next] in the subject to help CI identify the tree. Noted. Thanks a lot! Regards, Changwoo Min
© 2016 - 2026 Red Hat, Inc.