It's just a wrapper around spin_unlock_irqrestore() with zero value.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/time/posix-timers.c | 15 +++++----------
1 file changed, 5 insertions(+), 10 deletions(-)
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -144,11 +144,6 @@ static int posix_timer_add(struct k_itim
return -EAGAIN;
}
-static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
-{
- spin_unlock_irqrestore(&timr->it_lock, flags);
-}
-
static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp)
{
ktime_get_real_ts64(tp);
@@ -691,7 +686,7 @@ static int do_timer_gettime(timer_t time
else
kc->timer_get(timr, setting);
- unlock_timer(timr, flags);
+ spin_unlock_irqrestore(&timr->it_lock, flags);
return ret;
}
@@ -755,7 +750,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
return -EINVAL;
overrun = timer_overrun_to_int(timr);
- unlock_timer(timr, flags);
+ spin_unlock_irqrestore(&timr->it_lock, flags);
return overrun;
}
@@ -822,7 +817,7 @@ static struct k_itimer *timer_wait_runni
/* Prevent kfree(timer) after dropping the lock */
rcu_read_lock();
- unlock_timer(timer, *flags);
+ spin_unlock_irqrestore(&timer->it_lock, *flags);
/*
* kc->timer_wait_running() might drop RCU lock. So @timer
@@ -928,7 +923,7 @@ static int do_timer_settime(timer_t time
timr = timer_wait_running(timr, &flags);
goto retry;
}
- unlock_timer(timr, flags);
+ spin_unlock_irqrestore(&timr->it_lock, flags);
return error;
}
@@ -1046,7 +1041,7 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
WRITE_ONCE(timer->it_signal, NULL);
spin_unlock(¤t->sighand->siglock);
- unlock_timer(timer, flags);
+ spin_unlock_irqrestore(&timer->it_lock, flags);
posix_timer_unhash_and_free(timer);
return 0;
}
On Mon, Feb 24, 2025 at 11:15:28AM +0100, Thomas Gleixner wrote:
> It's just a wrapper around spin_unlock_irqrestore() with zero value.
Well, I disagree... the value is that is matches lock_timer(). Both in
naming and in argument types.
Anyway, I started tinkering with the code, it's more hacky than I'd like
it to be, but what do you think?
---
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -74,6 +74,29 @@ static struct k_itimer *__lock_timer(tim
__timr; \
})
+static inline void unlock_timer(struct k_itimer *timer, unsigned long flags)
+{
+ spin_unlock_irqrestore(&timer->it_lock, flags);
+}
+
+__DEFINE_CLASS_IS_CONDITIONAL(lock_timer, true);
+__DEFINE_UNLOCK_GUARD(lock_timer, struct k_itimer,
+ unlock_timer(_T->lock, _T->flags),
+ unsigned long flags);
+static inline class_lock_timer_t class_lock_timer_constructor(timer_t id)
+{
+ class_lock_timer_t _t = {
+ .lock = __lock_timer(id, &_t.flags),
+ };
+ return _t;
+}
+
+#define scoped_guard_end(_name) do { \
+ class_##_name##_t *_T = &(scope); \
+ class_##_name##_destructor(_T); \
+ _T->lock = NULL; \
+} while (0)
+
static int hash(struct signal_struct *sig, unsigned int nr)
{
return hash_32(hash32_ptr(sig) ^ nr, timer_hashbits);
@@ -327,14 +350,13 @@ bool posixtimer_deliver_signal(struct ke
* Release siglock to ensure proper locking order versus
* timr::it_lock. Keep interrupts disabled.
*/
- spin_unlock(¤t->sighand->siglock);
+ guard(spinlock)(¤t->sighand->siglock);
ret = __posixtimer_deliver_signal(info, timr);
/* Drop the reference which was acquired when the signal was queued */
posixtimer_putref(timr);
- spin_lock(¤t->sighand->siglock);
return ret;
}
@@ -717,24 +739,20 @@ void common_timer_get(struct k_itimer *t
static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
{
- const struct k_clock *kc;
- struct k_itimer *timr;
- unsigned long flags;
- int ret = 0;
-
- timr = lock_timer(timer_id, &flags);
- if (!timr)
- return -EINVAL;
+ scoped_guard (lock_timer, timer_id) {
+ struct k_itimer *timr = __guard_ptr(lock_timer)(&scope);
+ const struct k_clock *kc;
+
+ memset(setting, 0, sizeof(*setting));
+ kc = timr->kclock;
+ if (WARN_ON_ONCE(!kc || !kc->timer_get))
+ return -EINVAL;
- memset(setting, 0, sizeof(*setting));
- kc = timr->kclock;
- if (WARN_ON_ONCE(!kc || !kc->timer_get))
- ret = -EINVAL;
- else
kc->timer_get(timr, setting);
+ return 0;
+ }
- spin_unlock_irqrestore(&timr->it_lock, flags);
- return ret;
+ return -EINVAL;
}
/* Get the time remaining on a POSIX.1b interval timer. */
@@ -788,18 +806,12 @@ SYSCALL_DEFINE2(timer_gettime32, timer_t
*/
SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
{
- struct k_itimer *timr;
- unsigned long flags;
- int overrun;
-
- timr = lock_timer(timer_id, &flags);
- if (!timr)
- return -EINVAL;
-
- overrun = timer_overrun_to_int(timr);
- spin_unlock_irqrestore(&timr->it_lock, flags);
+ scoped_guard (lock_timer, timer_id) {
+ struct k_itimer *timr = __guard_ptr(lock_timer)(&scope);
- return overrun;
+ return timer_overrun_to_int(timr);
+ }
+ return -EINVAL;
}
static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
@@ -855,15 +867,9 @@ static void common_timer_wait_running(st
* when the task which tries to delete or disarm the timer has preempted
* the task which runs the expiry in task work context.
*/
-static struct k_itimer *timer_wait_running(struct k_itimer *timer, unsigned long *flags,
- bool delete)
+static void timer_wait_running(struct k_itimer *timer)
{
const struct k_clock *kc = READ_ONCE(timer->kclock);
- timer_t timer_id = READ_ONCE(timer->it_id);
-
- /* Prevent kfree(timer) after dropping the lock */
- rcu_read_lock();
- spin_unlock_irqrestore(&timer->it_lock, *flags);
/*
* kc->timer_wait_running() might drop RCU lock. So @timer cannot
@@ -872,27 +878,6 @@ static struct k_itimer *timer_wait_runni
*/
if (!WARN_ON_ONCE(!kc->timer_wait_running))
kc->timer_wait_running(timer);
-
- rcu_read_unlock();
-
- /*
- * On deletion the timer has been marked invalid before
- * timer_delete_hook() has been invoked. That means that the
- * current task is the only one which has access to the timer and
- * even after dropping timer::it_lock above, no other thread can
- * have accessed the timer.
- */
- if (delete) {
- spin_lock_irqsave(&timer->it_lock, *flags);
- return timer;
- }
-
- /*
- * If invoked from timer_set() the timer could have been deleted
- * after dropping the lock. So in that case the timer needs to be
- * looked up and validated.
- */
- return lock_timer(timer_id, flags);
}
/*
@@ -952,8 +937,6 @@ static int do_timer_settime(timer_t time
struct itimerspec64 *old_spec64)
{
const struct k_clock *kc;
- struct k_itimer *timr;
- unsigned long flags;
int error;
if (!timespec64_valid(&new_spec64->it_interval) ||
@@ -963,33 +946,35 @@ static int do_timer_settime(timer_t time
if (old_spec64)
memset(old_spec64, 0, sizeof(*old_spec64));
- timr = lock_timer(timer_id, &flags);
retry:
- if (!timr)
- return -EINVAL;
+ scoped_guard (lock_timer, timer_id) {
+ struct k_itimer *timr = __guard_ptr(lock_timer)(&scope);
- if (old_spec64)
- old_spec64->it_interval = ktime_to_timespec64(timr->it_interval);
+ if (old_spec64)
+ old_spec64->it_interval = ktime_to_timespec64(timr->it_interval);
- /* Prevent signal delivery and rearming. */
- timr->it_signal_seq++;
+ /* Prevent signal delivery and rearming. */
+ timr->it_signal_seq++;
+
+ kc = timr->kclock;
+ if (WARN_ON_ONCE(!kc || !kc->timer_set))
+ return -EINVAL;
- kc = timr->kclock;
- if (WARN_ON_ONCE(!kc || !kc->timer_set))
- error = -EINVAL;
- else
error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64);
+ if (error == TIMER_RETRY) {
+ // We already got the old time...
+ old_spec64 = NULL;
+ /* Unlocks and relocks the timer if it still exists */
+
+ guard(rcu)();
+ scoped_guard_end(lock_timer);
+ timer_wait_running(timr);
+ goto retry;
+ }
- if (error == TIMER_RETRY) {
- // We already got the old time...
- old_spec64 = NULL;
- /* Unlocks and relocks the timer if it still exists */
- timr = timer_wait_running(timr, &flags, false);
- goto retry;
+ return error;
}
- spin_unlock_irqrestore(&timr->it_lock, flags);
-
- return error;
+ return -EINVAL;
}
/* Set a POSIX.1b interval timer */
@@ -1072,18 +1057,8 @@ static inline int timer_delete_hook(stru
return kc->timer_del(timer);
}
-static int posix_timer_delete(struct k_itimer *timer, timer_t id)
+static void posix_timer_invalidate(struct k_itimer *timer, unsigned long flags)
{
- unsigned long flags;
-
- if (!timer) {
- timer = lock_timer(id, &flags);
- if (!timer)
- return -EINVAL;
- } else {
- spin_lock_irqsave(&timer->it_lock, flags);
- }
-
/*
* Invalidate the timer, remove it from the linked list and remove
* it from the ignored list if pending.
@@ -1115,19 +1090,23 @@ static int posix_timer_delete(struct k_i
* delete possible after unlocking the timer as the timer
* has been marked invalid above.
*/
- timer_wait_running(timer, &flags, true);
+ guard(rcu)();
+ spin_unlock_irqrestore(&timer->it_lock, flags);
+ timer_wait_running(timer);
+ spin_lock_irqsave(&timer->it_lock, flags);
}
-
- spin_unlock_irqrestore(&timer->it_lock, flags);
- /* Remove it from the hash, which frees up the timer ID */
- posix_timer_unhash_and_free(timer);
- return 0;
}
/* Delete a POSIX.1b interval timer. */
SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
{
- return posix_timer_delete(NULL, timer_id);
+ scoped_guard (lock_timer, timer_id) {
+ posix_timer_invalidate(scope.lock, scope.flags);
+ scoped_guard_end(lock_timer);
+ posix_timer_unhash_and_free(scope.lock);
+ return 0;
+ }
+ return -EINVAL;
}
/*
@@ -1143,13 +1122,17 @@ void exit_itimers(struct task_struct *ts
return;
/* Protect against concurrent read via /proc/$PID/timers */
- spin_lock_irq(&tsk->sighand->siglock);
- hlist_move_list(&tsk->signal->posix_timers, &timers);
- spin_unlock_irq(&tsk->sighand->siglock);
+ scoped_guard (spinlock_irq, &tsk->sighand->siglock)
+ hlist_move_list(&tsk->signal->posix_timers, &timers);
/* The timers are not longer accessible via tsk::signal */
while (!hlist_empty(&timers)) {
- posix_timer_delete(hlist_entry(timers.first, struct k_itimer, list), 0);
+ struct k_itimer *timer = hlist_entry(timers.first, struct k_itimer, list);
+
+ scoped_guard (spinlock_irqsave, &timer->it_lock)
+ posix_timer_invalidate(timer, scope.flags);
+
+ posix_timer_unhash_and_free(timer);
cond_resched();
}
On Mon, Feb 24 2025 at 17:21, Peter Zijlstra wrote:
> On Mon, Feb 24, 2025 at 11:15:28AM +0100, Thomas Gleixner wrote:
>> It's just a wrapper around spin_unlock_irqrestore() with zero value.
>
> Well, I disagree... the value is that is matches lock_timer(). Both in
> naming and in argument types.
Sure, but it's not used consistently as we have places where
lock_timer() is not involved.
> @@ -327,14 +350,13 @@ bool posixtimer_deliver_signal(struct ke
> * Release siglock to ensure proper locking order versus
> * timr::it_lock. Keep interrupts disabled.
> */
> - spin_unlock(¤t->sighand->siglock);
> + guard(spinlock)(¤t->sighand->siglock);
How is that equivalent?
This is a unlock/lock pair because __posix_timer_deliver_signal() takes
timr->it_lock and now you introduced the ABBA which the sighand::siglock
unlock carefully avoids :)
>
> ret = __posixtimer_deliver_signal(info, timr);
>
> /* Drop the reference which was acquired when the signal was queued */
> posixtimer_putref(timr);
>
> - spin_lock(¤t->sighand->siglock);
> return ret;
> }
>
> @@ -717,24 +739,20 @@ void common_timer_get(struct k_itimer *t
>
> static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
> {
> - const struct k_clock *kc;
> - struct k_itimer *timr;
> - unsigned long flags;
> - int ret = 0;
> -
> - timr = lock_timer(timer_id, &flags);
> - if (!timr)
> - return -EINVAL;
> + scoped_guard (lock_timer, timer_id) {
> + struct k_itimer *timr = __guard_ptr(lock_timer)(&scope);
> + const struct k_clock *kc;
> +
> + memset(setting, 0, sizeof(*setting));
> + kc = timr->kclock;
> + if (WARN_ON_ONCE(!kc || !kc->timer_get))
> + return -EINVAL;
>
> - memset(setting, 0, sizeof(*setting));
> - kc = timr->kclock;
> - if (WARN_ON_ONCE(!kc || !kc->timer_get))
> - ret = -EINVAL;
> - else
> kc->timer_get(timr, setting);
> + return 0;
> + }
>
> - spin_unlock_irqrestore(&timr->it_lock, flags);
> - return ret;
> + return -EINVAL;
So the resulting code is:
scoped_guard (lock_timer, timer_id) {
struct k_itimer *timr = __guard_ptr(lock_timer)(&scope);
const struct k_clock *kc;
memset(setting, 0, sizeof(*setting));
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_get))
return -EINVAL;
return 0;
}
return -EINVAL;
I had to go and stare at the guard/class muck 10 times to convince
myself, that this actually works. This really wants to be express the
condition of the scoped_guard() somehow, e.g. scoped_cond_guard() or
such.
> /* Delete a POSIX.1b interval timer. */
> SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
> {
> - return posix_timer_delete(NULL, timer_id);
> + scoped_guard (lock_timer, timer_id) {
> + posix_timer_invalidate(scope.lock, scope.flags);
> + scoped_guard_end(lock_timer);
> + posix_timer_unhash_and_free(scope.lock);
Not sure whether it's a good idea to free the scope.lock and not
scope.timer :)
Thanks,
tglx
On Mon, Feb 24, 2025 at 07:43:26PM +0100, Thomas Gleixner wrote:
> On Mon, Feb 24 2025 at 17:21, Peter Zijlstra wrote:
> > On Mon, Feb 24, 2025 at 11:15:28AM +0100, Thomas Gleixner wrote:
> >> It's just a wrapper around spin_unlock_irqrestore() with zero value.
> >
> > Well, I disagree... the value is that is matches lock_timer(). Both in
> > naming and in argument types.
>
> Sure, but it's not used consistently as we have places where
> lock_timer() is not involved.
>
> > @@ -327,14 +350,13 @@ bool posixtimer_deliver_signal(struct ke
> > * Release siglock to ensure proper locking order versus
> > * timr::it_lock. Keep interrupts disabled.
> > */
> > - spin_unlock(¤t->sighand->siglock);
> > + guard(spinlock)(¤t->sighand->siglock);
>
> How is that equivalent?
I R idiot :-)
> So the resulting code is:
>
> scoped_guard (lock_timer, timer_id) {
> struct k_itimer *timr = __guard_ptr(lock_timer)(&scope);
> const struct k_clock *kc;
>
> memset(setting, 0, sizeof(*setting));
> kc = timr->kclock;
> if (WARN_ON_ONCE(!kc || !kc->timer_get))
> return -EINVAL;
>
> return 0;
> }
> return -EINVAL;
>
> I had to go and stare at the guard/class muck 10 times to convince
> myself, that this actually works. This really wants to be express the
> condition of the scoped_guard() somehow, e.g. scoped_cond_guard() or
> such.
Right, so the alternative form is something like:
scoped_cond_guard (lock_timer, return -EINVAL, timer_id) {
struct k_itimer *timr = __guard_ptr(lock_timer)(&scope);
const struct k_clock *kc;
memset(setting, 0, sizeof(*setting));
kc = timr->kclock;
if (WARN_ON_ONCE(!kc || !kc->timer_get))
return -EINVAL;
}
return 0;
Is that really so much better?
> > /* Delete a POSIX.1b interval timer. */
> > SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
> > {
> > - return posix_timer_delete(NULL, timer_id);
> > + scoped_guard (lock_timer, timer_id) {
> > + posix_timer_invalidate(scope.lock, scope.flags);
> > + scoped_guard_end(lock_timer);
> > + posix_timer_unhash_and_free(scope.lock);
>
> Not sure whether it's a good idea to free the scope.lock and not
> scope.timer :)
There is no scope.timer, the way this work is that the main pointer is
.lock, per the __DEFINE_UNLOCK_GUARD() helper.
I said there were rough edges :-/
Anyway, should I continue poking at this to see if I can clean it up /
extract more useful helpers.
Or shall I just let it be.
© 2016 - 2026 Red Hat, Inc.