Introduce bpf_in_nmi(), bpf_in_hardirq(), bpf_in_serving_softirq(), and
bpf_in_task() inline helpers in bpf_experimental.h. These allow BPF
programs to query the current execution context with higher granularity
than the existing bpf_in_interrupt() helper.
While BPF programs can often infer their context from attachment points,
subsystems like sched_ext may call the same BPF logic from multiple
contexts (e.g., task-to-task wake-ups vs. interrupt-to-task wake-ups).
These helpers provide a reliable way for logic to branch based on
the current CPU execution state.
Implementing these as BPF-native inline helpers wrapping
get_preempt_count() allows the compiler and JIT to inline the logic. The
implementation accounts for differences in preempt_count layout between
standard and PREEMPT_RT kernels.
Reviewed-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Changwoo Min <changwoo@igalia.com>
---
.../testing/selftests/bpf/bpf_experimental.h | 58 +++++++++++++++++++
1 file changed, 58 insertions(+)
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 68a49b1f77ae..a39576c8ba04 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -610,6 +610,8 @@ extern int bpf_cgroup_read_xattr(struct cgroup *cgroup, const char *name__str,
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+
extern bool CONFIG_PREEMPT_RT __kconfig __weak;
#ifdef bpf_target_x86
extern const int __preempt_count __ksym;
@@ -648,4 +650,60 @@ static inline int bpf_in_interrupt(void)
(tsk->softirq_disable_cnt & SOFTIRQ_MASK);
}
+/* Description
+ * Report whether it is in NMI context. Only works on the following archs:
+ * * x86
+ * * arm64
+ */
+static inline int bpf_in_nmi(void)
+{
+ return get_preempt_count() & NMI_MASK;
+}
+
+/* Description
+ * Report whether it is in hard IRQ context. Only works on the following archs:
+ * * x86
+ * * arm64
+ */
+static inline int bpf_in_hardirq(void)
+{
+ return get_preempt_count() & HARDIRQ_MASK;
+}
+
+/* Description
+ * Report whether it is in softirq context. Only works on the following archs:
+ * * x86
+ * * arm64
+ */
+static inline int bpf_in_serving_softirq(void)
+{
+ struct task_struct___preempt_rt *tsk;
+ int pcnt;
+
+ pcnt = get_preempt_count();
+ if (!CONFIG_PREEMPT_RT)
+ return (pcnt & SOFTIRQ_MASK) & SOFTIRQ_OFFSET;
+
+ tsk = (void *) bpf_get_current_task_btf();
+ return (tsk->softirq_disable_cnt & SOFTIRQ_MASK) & SOFTIRQ_OFFSET;
+}
+
+/* Description
+ * Report whether it is in task context. Only works on the following archs:
+ * * x86
+ * * arm64
+ */
+static inline int bpf_in_task(void)
+{
+ struct task_struct___preempt_rt *tsk;
+ int pcnt;
+
+ pcnt = get_preempt_count();
+ if (!CONFIG_PREEMPT_RT)
+ return !(pcnt & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+ tsk = (void *) bpf_get_current_task_btf();
+ return !((pcnt & (NMI_MASK | HARDIRQ_MASK)) |
+ ((tsk->softirq_disable_cnt & SOFTIRQ_MASK) & SOFTIRQ_OFFSET));
+}
#endif
--
2.52.0
© 2016 - 2026 Red Hat, Inc.