To comply with Misra rule 8.10 ("An inline function shall be declared
with the static storage class"), convert what is presently
_spin_lock_cb() to an always-inline (and static) helper, while making
the function itself a thin wrapper, just like _spin_lock() is.
While there drop the unlikely() from the callback check, and correct
indentation in _spin_lock().
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -304,7 +304,8 @@ static always_inline u16 observe_head(sp
return read_atomic(&t->head);
}
-void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
+static void always_inline spin_lock_common(spinlock_t *lock,
+ void (*cb)(void *), void *data)
{
spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
LOCK_PROFILE_VAR;
@@ -316,7 +317,7 @@ void inline _spin_lock_cb(spinlock_t *lo
while ( tickets.tail != observe_head(&lock->tickets) )
{
LOCK_PROFILE_BLOCK;
- if ( unlikely(cb) )
+ if ( cb )
cb(data);
arch_lock_relax();
}
@@ -327,7 +328,12 @@ void inline _spin_lock_cb(spinlock_t *lo
void _spin_lock(spinlock_t *lock)
{
- _spin_lock_cb(lock, NULL, NULL);
+ spin_lock_common(lock, NULL, NULL);
+}
+
+void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
+{
+ spin_lock_common(lock, cb, data);
}
void _spin_lock_irq(spinlock_t *lock)
On Wed, 14 Jun 2023, Jan Beulich wrote:
> To comply with Misra rule 8.10 ("An inline function shall be declared
> with the static storage class"), convert what is presently
> _spin_lock_cb() to an always-inline (and static) helper, while making
> the function itself a thin wrapper, just like _spin_lock() is.
>
> While there drop the unlikely() from the callback check, and correct
> indentation in _spin_lock().
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
> --- a/xen/common/spinlock.c
> +++ b/xen/common/spinlock.c
> @@ -304,7 +304,8 @@ static always_inline u16 observe_head(sp
> return read_atomic(&t->head);
> }
>
> -void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
> +static void always_inline spin_lock_common(spinlock_t *lock,
> + void (*cb)(void *), void *data)
> {
> spinlock_tickets_t tickets = SPINLOCK_TICKET_INC;
> LOCK_PROFILE_VAR;
> @@ -316,7 +317,7 @@ void inline _spin_lock_cb(spinlock_t *lo
> while ( tickets.tail != observe_head(&lock->tickets) )
> {
> LOCK_PROFILE_BLOCK;
> - if ( unlikely(cb) )
> + if ( cb )
> cb(data);
> arch_lock_relax();
> }
> @@ -327,7 +328,12 @@ void inline _spin_lock_cb(spinlock_t *lo
>
> void _spin_lock(spinlock_t *lock)
> {
> - _spin_lock_cb(lock, NULL, NULL);
> + spin_lock_common(lock, NULL, NULL);
> +}
> +
> +void _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data)
> +{
> + spin_lock_common(lock, cb, data);
> }
>
> void _spin_lock_irq(spinlock_t *lock)
>
© 2016 - 2026 Red Hat, Inc.