They are implemented as a workqueue, which means that there are no
guarantees of timing nor ordering.
Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
---
changes in v6:
- dropped extra spinlock
- implement cancel_and_free of a sleepable timer through
a dedicated workqueue
no changes in v5
changes in v4:
- dropped __bpf_timer_compute_key()
- use a spin_lock instead of a semaphore
- ensure bpf_timer_cancel_and_free is not complaining about
non sleepable context and use cancel_work() instead of
cancel_work_sync()
- return -EINVAL if a delay is given to bpf_timer_start() with
BPF_F_TIMER_SLEEPABLE
changes in v3:
- extracted the implementation in bpf_timer only, without
bpf_timer_set_sleepable_cb()
- rely on schedule_work() only, from bpf_timer_start()
- add semaphore to ensure bpf_timer_work_cb() is accessing
consistent data
changes in v2 (compared to the one attaches to v1 0/9):
- make use of a kfunc
- add a (non-used) BPF_F_TIMER_SLEEPABLE
- the callback is *not* called, it makes the kernel crashes
---
include/uapi/linux/bpf.h | 13 ++++
kernel/bpf/helpers.c | 159 ++++++++++++++++++++++++++++++++++++++---------
2 files changed, 143 insertions(+), 29 deletions(-)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 9585f5345353..f1890eed213a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -7469,6 +7469,19 @@ enum {
BPF_F_TIMER_CPU_PIN = (1ULL << 1),
};
+/* Extra flags to control bpf_timer_init() behaviour, in addition to CLOCK_*.
+ * - BPF_F_TIMER_SLEEPABLE: Timer will run in a workqueue in a sleepable
+ * context, with no guarantees of ordering nor timing (consider this as
+ * being just offloaded immediately).
+ */
+enum {
+ /* CLOCK_* are using bits 0 to 3 */
+ BPF_F_TIMER_SLEEPABLE = (1ULL << 4),
+ __MAX_BPF_F_TIMER_INIT,
+};
+
+#define MAX_BPF_F_TIMER_INIT __MAX_BPF_F_TIMER_INIT
+
/* BPF numbers iterator state */
struct bpf_iter_num {
/* opaque iterator state; having __u64 here allows to preserve correct
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 9234174ccb21..fd05d4358b31 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1096,12 +1096,19 @@ const struct bpf_func_proto bpf_snprintf_proto = {
* freeing the timers when inner map is replaced or deleted by user space.
*/
struct bpf_hrtimer {
- struct hrtimer timer;
+ union {
+ struct hrtimer timer;
+ struct work_struct work;
+ };
struct bpf_map *map;
struct bpf_prog *prog;
void __rcu *callback_fn;
void *value;
- struct rcu_head rcu;
+ union {
+ struct rcu_head rcu;
+ struct work_struct sync_work;
+ };
+ u64 flags;
};
/* the actual struct hidden inside uapi struct bpf_timer */
@@ -1114,6 +1121,58 @@ struct bpf_timer_kern {
struct bpf_spin_lock lock;
} __attribute__((aligned(8)));
+static void bpf_timer_work_cb(struct work_struct *work)
+{
+ struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, work);
+ struct bpf_tramp_run_ctx __maybe_unused run_ctx;
+ struct bpf_prog *prog = t->prog;
+ struct bpf_map *map = t->map;
+ bpf_callback_t callback_fn;
+ void *value = t->value;
+ unsigned long flags;
+ void *key;
+ u32 idx;
+
+ BTF_TYPE_EMIT(struct bpf_timer);
+
+ callback_fn = READ_ONCE(t->callback_fn);
+ if (!callback_fn || !prog)
+ return;
+
+ if (map->map_type == BPF_MAP_TYPE_ARRAY) {
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+
+ /* compute the key */
+ idx = ((char *)value - array->value) / array->elem_size;
+ key = &idx;
+ } else { /* hash or lru */
+ key = value - round_up(map->key_size, 8);
+ }
+
+ run_ctx.bpf_cookie = 0;
+
+ if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
+ /* recursion detected */
+ __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
+ return;
+ }
+
+ callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
+ /* The verifier checked that return value is zero. */
+
+ __bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
+ &run_ctx);
+}
+
+static void bpf_timer_sync_work_cb(struct work_struct *work)
+{
+ struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, sync_work);
+
+ cancel_work_sync(&t->work);
+
+ kfree_rcu(t, rcu);
+}
+
static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
@@ -1155,10 +1214,14 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
return HRTIMER_NORESTART;
}
+#define BPF_TIMER_CLOCK_MASK (MAX_CLOCKS - 1) /* 0xf */
+#define BPF_TIMER_FLAGS_MASK GENMASK_ULL(63, 4)
+
BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
u64, flags)
{
- clockid_t clockid = flags & (MAX_CLOCKS - 1);
+ clockid_t clockid = flags & BPF_TIMER_CLOCK_MASK;
+ u64 bpf_timer_flags = flags & BPF_TIMER_FLAGS_MASK;
struct bpf_hrtimer *t;
int ret = 0;
@@ -1169,7 +1232,7 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
if (in_nmi())
return -EOPNOTSUPP;
- if (flags >= MAX_CLOCKS ||
+ if (bpf_timer_flags & ~(BPF_F_TIMER_SLEEPABLE) ||
/* similar to timerfd except _ALARM variants are not supported */
(clockid != CLOCK_MONOTONIC &&
clockid != CLOCK_REALTIME &&
@@ -1190,8 +1253,14 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
t->value = (void *)timer - map->record->timer_off;
t->map = map;
t->prog = NULL;
+ t->flags = flags;
rcu_assign_pointer(t->callback_fn, NULL);
- hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
+ if (flags & BPF_F_TIMER_SLEEPABLE) {
+ INIT_WORK(&t->work, bpf_timer_work_cb);
+ INIT_WORK(&t->sync_work, bpf_timer_sync_work_cb);
+ } else {
+ hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
+ }
t->timer.function = bpf_timer_cb;
WRITE_ONCE(timer->timer, t);
/* Guarantee the order between timer->timer and map->usercnt. So
@@ -1292,6 +1361,11 @@ BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, fla
goto out;
}
+ if ((t->flags & BPF_F_TIMER_SLEEPABLE) && nsecs) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (flags & BPF_F_TIMER_ABS)
mode = HRTIMER_MODE_ABS_SOFT;
else
@@ -1300,7 +1374,10 @@ BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, fla
if (flags & BPF_F_TIMER_CPU_PIN)
mode |= HRTIMER_MODE_PINNED;
- hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
+ if (t->flags & BPF_F_TIMER_SLEEPABLE)
+ schedule_work(&t->work);
+ else
+ hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
out:
__bpf_spin_unlock_irqrestore(&timer->lock);
return ret;
@@ -1329,6 +1406,7 @@ static void drop_prog_refcnt(struct bpf_hrtimer *t)
BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
{
struct bpf_hrtimer *t;
+ u64 flags = 0;
int ret = 0;
if (in_nmi())
@@ -1340,6 +1418,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
ret = -EINVAL;
goto out;
}
+ flags = t->flags;
if (this_cpu_read(hrtimer_running) == t) {
/* If bpf callback_fn is trying to bpf_timer_cancel()
* its own timer the hrtimer_cancel() will deadlock
@@ -1351,10 +1430,20 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer)
drop_prog_refcnt(t);
out:
__bpf_spin_unlock_irqrestore(&timer->lock);
- /* Cancel the timer and wait for associated callback to finish
- * if it was running.
- */
- ret = ret ?: hrtimer_cancel(&t->timer);
+
+ if (flags & BPF_F_TIMER_SLEEPABLE) {
+ /* Cancel the sleepable work, but *do not* wait for
+ * it to finish if it was running as we might not be in a
+ * sleepable context
+ */
+ ret = ret ?: cancel_work(&t->work);
+ } else {
+ /* Cancel the timer and wait for associated callback to finish
+ * if it was running.
+ */
+ ret = ret ?: hrtimer_cancel(&t->timer);
+ }
+
rcu_read_unlock();
return ret;
}
@@ -1373,6 +1462,7 @@ void bpf_timer_cancel_and_free(void *val)
{
struct bpf_timer_kern *timer = val;
struct bpf_hrtimer *t;
+ u64 flags;
/* Performance optimization: read timer->timer without lock first. */
if (!READ_ONCE(timer->timer))
@@ -1383,6 +1473,7 @@ void bpf_timer_cancel_and_free(void *val)
t = timer->timer;
if (!t)
goto out;
+ flags = t->flags;
drop_prog_refcnt(t);
/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
* this timer, since it won't be initialized.
@@ -1392,25 +1483,35 @@ void bpf_timer_cancel_and_free(void *val)
__bpf_spin_unlock_irqrestore(&timer->lock);
if (!t)
return;
- /* Cancel the timer and wait for callback to complete if it was running.
- * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
- * right after for both preallocated and non-preallocated maps.
- * The timer->timer = NULL was already done and no code path can
- * see address 't' anymore.
- *
- * Check that bpf_map_delete/update_elem() wasn't called from timer
- * callback_fn. In such case don't call hrtimer_cancel() (since it will
- * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
- * return -1). Though callback_fn is still running on this cpu it's
- * safe to do kfree(t) because bpf_timer_cb() read everything it needed
- * from 't'. The bpf subprog callback_fn won't be able to access 't',
- * since timer->timer = NULL was already done. The timer will be
- * effectively cancelled because bpf_timer_cb() will return
- * HRTIMER_NORESTART.
- */
- if (this_cpu_read(hrtimer_running) != t)
- hrtimer_cancel(&t->timer);
- kfree_rcu(t, rcu);
+
+ if (flags & BPF_F_TIMER_SLEEPABLE) {
+ /* Trigger cancel of the sleepable work, but *do not* wait for
+ * it to finish if it was running as we might not be in a
+ * sleepable context.
+ * kfree will be called once the work has finished.
+ */
+ schedule_work(&t->sync_work);
+ } else {
+ /* Cancel the timer and wait for callback to complete if it was running.
+ * If hrtimer_cancel() can be safely called it's safe to call kfree(t)
+ * right after for both preallocated and non-preallocated maps.
+ * The timer->timer = NULL was already done and no code path can
+ * see address 't' anymore.
+ *
+ * Check that bpf_map_delete/update_elem() wasn't called from timer
+ * callback_fn. In such case don't call hrtimer_cancel() (since it will
+ * deadlock) and don't call hrtimer_try_to_cancel() (since it will just
+ * return -1). Though callback_fn is still running on this cpu it's
+ * safe to do kfree(t) because bpf_timer_cb() read everything it needed
+ * from 't'. The bpf subprog callback_fn won't be able to access 't',
+ * since timer->timer = NULL was already done. The timer will be
+ * effectively cancelled because bpf_timer_cb() will return
+ * HRTIMER_NORESTART.
+ */
+ if (this_cpu_read(hrtimer_running) != t)
+ hrtimer_cancel(&t->timer);
+ kfree_rcu(t, rcu);
+ }
}
BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
--
2.44.0
On Mon, 2024-04-08 at 10:09 +0200, Benjamin Tissoires wrote:
[...]
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 9234174ccb21..fd05d4358b31 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -1096,12 +1096,19 @@ const struct bpf_func_proto bpf_snprintf_proto = {
> * freeing the timers when inner map is replaced or deleted by user space.
> */
> struct bpf_hrtimer {
> - struct hrtimer timer;
> + union {
> + struct hrtimer timer;
> + struct work_struct work;
> + };
> struct bpf_map *map;
> struct bpf_prog *prog;
> void __rcu *callback_fn;
> void *value;
> - struct rcu_head rcu;
> + union {
> + struct rcu_head rcu;
> + struct work_struct sync_work;
Nit:
I find this name very confusing, the field is used to cancel timer
execution, is it a convention to call such things '...sync...'?
> + };
> + u64 flags;
> };
>
[...]
> +static void bpf_timer_sync_work_cb(struct work_struct *work)
> +{
> + struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, sync_work);
> +
> + cancel_work_sync(&t->work);
> +
> + kfree_rcu(t, rcu);
Sorry, I might be wrong, but this looks suspicious.
The 'rcu' field of 'bpf_hrtimer' is defined as follows:
struct bpf_hrtimer {
...
union {
struct rcu_head rcu;
struct work_struct sync_work;
};
...
};
And for sleepable timers the 'sync_work' field is set as follows:
BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
u64, flags)
{
...
INIT_WORK(&t->sync_work, bpf_timer_sync_work_cb);
...
}
So, it looks like 'kfree_rcu' would be called for a non-rcu pointer.
> +}
> +
On Mon, Apr 8, 2024 at 7:08 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Mon, 2024-04-08 at 10:09 +0200, Benjamin Tissoires wrote:
>
> [...]
>
> > diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> > index 9234174ccb21..fd05d4358b31 100644
> > --- a/kernel/bpf/helpers.c
> > +++ b/kernel/bpf/helpers.c
> > @@ -1096,12 +1096,19 @@ const struct bpf_func_proto bpf_snprintf_proto = {
> > * freeing the timers when inner map is replaced or deleted by user space.
> > */
> > struct bpf_hrtimer {
> > - struct hrtimer timer;
> > + union {
> > + struct hrtimer timer;
> > + struct work_struct work;
> > + };
> > struct bpf_map *map;
> > struct bpf_prog *prog;
> > void __rcu *callback_fn;
> > void *value;
> > - struct rcu_head rcu;
> > + union {
> > + struct rcu_head rcu;
> > + struct work_struct sync_work;
>
> Nit:
> I find this name very confusing, the field is used to cancel timer
> execution, is it a convention to call such things '...sync...'?
>
> > + };
> > + u64 flags;
> > };
> >
>
> [...]
>
> > +static void bpf_timer_sync_work_cb(struct work_struct *work)
> > +{
> > + struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, sync_work);
> > +
> > + cancel_work_sync(&t->work);
> > +
> > + kfree_rcu(t, rcu);
>
> Sorry, I might be wrong, but this looks suspicious.
> The 'rcu' field of 'bpf_hrtimer' is defined as follows:
>
> struct bpf_hrtimer {
> ...
> union {
> struct rcu_head rcu;
> struct work_struct sync_work;
> };
> ...
> };
>
> And for sleepable timers the 'sync_work' field is set as follows:
>
> BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
> u64, flags)
> {
> ...
> INIT_WORK(&t->sync_work, bpf_timer_sync_work_cb);
> ...
> }
>
> So, it looks like 'kfree_rcu' would be called for a non-rcu pointer.
That was my initial assumption too, but Alexei told me it was fine.
And I think he is correct because kfree_rcu doesn't need the rcu_head
to be initialized.
So in the end, we initialize the memory as a work_struct, and when
that work kicks in, we reuse that exact same memory as the rcu_head.
This is fine because that work will never be reused.
If I understand correctly, this is to save a few bytes as this is a
critical struct used in programs with a high rate usage, and every
byte counts.
Cheers,
Benjamin
>
> > +}
> > +
>
>
On Mon, Apr 8, 2024 at 10:20 AM Benjamin Tissoires
<benjamin.tissoires@redhat.com> wrote:
>
> On Mon, Apr 8, 2024 at 7:08 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
> >
> > On Mon, 2024-04-08 at 10:09 +0200, Benjamin Tissoires wrote:
> >
> > [...]
> >
> > > diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> > > index 9234174ccb21..fd05d4358b31 100644
> > > --- a/kernel/bpf/helpers.c
> > > +++ b/kernel/bpf/helpers.c
> > > @@ -1096,12 +1096,19 @@ const struct bpf_func_proto bpf_snprintf_proto = {
> > > * freeing the timers when inner map is replaced or deleted by user space.
> > > */
> > > struct bpf_hrtimer {
> > > - struct hrtimer timer;
> > > + union {
> > > + struct hrtimer timer;
> > > + struct work_struct work;
> > > + };
> > > struct bpf_map *map;
> > > struct bpf_prog *prog;
> > > void __rcu *callback_fn;
> > > void *value;
> > > - struct rcu_head rcu;
> > > + union {
> > > + struct rcu_head rcu;
> > > + struct work_struct sync_work;
> >
> > Nit:
> > I find this name very confusing, the field is used to cancel timer
> > execution, is it a convention to call such things '...sync...'?
> >
> > > + };
> > > + u64 flags;
> > > };
> > >
> >
> > [...]
> >
> > > +static void bpf_timer_sync_work_cb(struct work_struct *work)
> > > +{
> > > + struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, sync_work);
> > > +
> > > + cancel_work_sync(&t->work);
> > > +
> > > + kfree_rcu(t, rcu);
> >
> > Sorry, I might be wrong, but this looks suspicious.
> > The 'rcu' field of 'bpf_hrtimer' is defined as follows:
> >
> > struct bpf_hrtimer {
> > ...
> > union {
> > struct rcu_head rcu;
> > struct work_struct sync_work;
> > };
> > ...
> > };
> >
> > And for sleepable timers the 'sync_work' field is set as follows:
> >
> > BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
> > u64, flags)
> > {
> > ...
> > INIT_WORK(&t->sync_work, bpf_timer_sync_work_cb);
> > ...
> > }
> >
> > So, it looks like 'kfree_rcu' would be called for a non-rcu pointer.
>
> That was my initial assumption too, but Alexei told me it was fine.
> And I think he is correct because kfree_rcu doesn't need the rcu_head
> to be initialized.
>
> So in the end, we initialize the memory as a work_struct, and when
> that work kicks in, we reuse that exact same memory as the rcu_head.
> This is fine because that work will never be reused.
>
> If I understand correctly, this is to save a few bytes as this is a
> critical struct used in programs with a high rate usage, and every
> byte counts.
Yes. All correct.
Probably makes sense to add a comment before kfree_rcu()
line in bpf_timer_sync_work_cb() that
kfree_rcu will wait for bpf_timer_cancel() to finish
as was done in
commit 0281b919e175 ("bpf: Fix racing between
bpf_timer_cancel_and_free and bpf_timer_cancel").
I suspect that's what confused Eduard.
The patch 1 looks great overall.
If we're going to keep this combined bpf_timer_* api for both wq
and timer we'd need to add flags compatibility check
to bpf_timer_start() too.
We can disallow this flag in 'flags' argument and use one from t->flags.
Which kinda makes sense to make bpf_timer_start() less verbose.
Alternatively we can allow bpf_timer_start() to have it,
but then we'd need to check that it is actually passed.
Either way the patch needs an extra check in bpf_timer_start().
Just ignoring BPF_F_TIMER_SLEEPABLE in bpf_timer_start() doesn't look right.
On Mon, 2024-04-08 at 19:20 +0200, Benjamin Tissoires wrote: [...] > That was my initial assumption too, but Alexei told me it was fine. > And I think he is correct because kfree_rcu doesn't need the rcu_head > to be initialized. > > So in the end, we initialize the memory as a work_struct, and when > that work kicks in, we reuse that exact same memory as the rcu_head. > This is fine because that work will never be reused. Oh, I get it, thank you for explanation. Thanks, Eduard
© 2016 - 2026 Red Hat, Inc.