[PATCH 3/3] locking: Add lock context annotations in the spinlock implementation

Bart Van Assche posted 3 patches 3 weeks, 3 days ago
[PATCH 3/3] locking: Add lock context annotations in the spinlock implementation
Posted by Bart Van Assche 3 weeks, 3 days ago
Make the spinlock implementation compatible with lock context analysis
(CONTEXT_ANALYSIS := 1) by adding lock context annotations to the
_raw_##op##_...() macros.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 kernel/locking/spinlock.c | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 7685defd7c52..b42d293da38b 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -64,8 +64,9 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
  * time (making _this_ CPU preemptible if possible), and we also signal
  * towards that other CPU that it should break the lock ASAP.
  */
-#define BUILD_LOCK_OPS(op, locktype)					\
+#define BUILD_LOCK_OPS(op, locktype, lock_ctx_op)			\
 static void __lockfunc __raw_##op##_lock(locktype##_t *lock)		\
+	lock_ctx_op(lock)						\
 {									\
 	for (;;) {							\
 		preempt_disable();					\
@@ -78,6 +79,7 @@ static void __lockfunc __raw_##op##_lock(locktype##_t *lock)		\
 }									\
 									\
 static unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
+	lock_ctx_op(lock)						\
 {									\
 	unsigned long flags;						\
 									\
@@ -96,11 +98,13 @@ static unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
 }									\
 									\
 static void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)	\
+	lock_ctx_op(lock)						\
 {									\
 	_raw_##op##_lock_irqsave(lock);					\
 }									\
 									\
 static void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)		\
+	lock_ctx_op(lock)						\
 {									\
 	unsigned long flags;						\
 									\
@@ -123,11 +127,11 @@ static void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)		\
  *         __[spin|read|write]_lock_irqsave()
  *         __[spin|read|write]_lock_bh()
  */
-BUILD_LOCK_OPS(spin, raw_spinlock);
+BUILD_LOCK_OPS(spin, raw_spinlock, __acquires);
 
 #ifndef CONFIG_PREEMPT_RT
-BUILD_LOCK_OPS(read, rwlock);
-BUILD_LOCK_OPS(write, rwlock);
+BUILD_LOCK_OPS(read, rwlock, __acquires_shared);
+BUILD_LOCK_OPS(write, rwlock, __acquires);
 #endif
 
 #endif
Re: [PATCH 3/3] locking: Add lock context annotations in the spinlock implementation
Posted by Marco Elver 2 weeks, 6 days ago
On Fri, 13 Mar 2026 at 18:15, Bart Van Assche <bvanassche@acm.org> wrote:
>
> Make the spinlock implementation compatible with lock context analysis
> (CONTEXT_ANALYSIS := 1) by adding lock context annotations to the

CONTEXT_ANALYSIS := y

> _raw_##op##_...() macros.
>
> Signed-off-by: Bart Van Assche <bvanassche@acm.org>
> ---
>  kernel/locking/spinlock.c | 12 ++++++++----
>  1 file changed, 8 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
> index 7685defd7c52..b42d293da38b 100644
> --- a/kernel/locking/spinlock.c
> +++ b/kernel/locking/spinlock.c
> @@ -64,8 +64,9 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
>   * time (making _this_ CPU preemptible if possible), and we also signal
>   * towards that other CPU that it should break the lock ASAP.
>   */
> -#define BUILD_LOCK_OPS(op, locktype)                                   \
> +#define BUILD_LOCK_OPS(op, locktype, lock_ctx_op)                      \
>  static void __lockfunc __raw_##op##_lock(locktype##_t *lock)           \
> +       lock_ctx_op(lock)                                               \
>  {                                                                      \
>         for (;;) {                                                      \
>                 preempt_disable();                                      \
> @@ -78,6 +79,7 @@ static void __lockfunc __raw_##op##_lock(locktype##_t *lock)          \
>  }                                                                      \
>                                                                         \
>  static unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
> +       lock_ctx_op(lock)                                               \
>  {                                                                      \
>         unsigned long flags;                                            \
>                                                                         \
> @@ -96,11 +98,13 @@ static unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
>  }                                                                      \
>                                                                         \
>  static void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)       \
> +       lock_ctx_op(lock)                                               \
>  {                                                                      \
>         _raw_##op##_lock_irqsave(lock);                                 \
>  }                                                                      \
>                                                                         \
>  static void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)                \
> +       lock_ctx_op(lock)                                               \
>  {                                                                      \
>         unsigned long flags;                                            \
>                                                                         \
> @@ -123,11 +127,11 @@ static void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)           \
>   *         __[spin|read|write]_lock_irqsave()
>   *         __[spin|read|write]_lock_bh()
>   */
> -BUILD_LOCK_OPS(spin, raw_spinlock);
> +BUILD_LOCK_OPS(spin, raw_spinlock, __acquires);
>
>  #ifndef CONFIG_PREEMPT_RT
> -BUILD_LOCK_OPS(read, rwlock);
> -BUILD_LOCK_OPS(write, rwlock);
> +BUILD_LOCK_OPS(read, rwlock, __acquires_shared);
> +BUILD_LOCK_OPS(write, rwlock, __acquires);
>  #endif
>
>  #endif
[tip: locking/core] locking: Add lock context annotations in the spinlock implementation
Posted by tip-bot2 for Bart Van Assche 2 weeks, 6 days ago
The following commit has been merged into the locking/core branch of tip:

Commit-ID:     b06e988c4c52ce8750616ea9b23c8bd3b611b931
Gitweb:        https://git.kernel.org/tip/b06e988c4c52ce8750616ea9b23c8bd3b611b931
Author:        Bart Van Assche <bvanassche@acm.org>
AuthorDate:    Fri, 13 Mar 2026 10:15:09 -07:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Mon, 16 Mar 2026 13:16:50 +01:00

locking: Add lock context annotations in the spinlock implementation

Make the spinlock implementation compatible with lock context analysis
(CONTEXT_ANALYSIS := 1) by adding lock context annotations to the
_raw_##op##_...() macros.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20260313171510.230998-4-bvanassche@acm.org
---
 kernel/locking/spinlock.c | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 7685def..b42d293 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -64,8 +64,9 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
  * time (making _this_ CPU preemptible if possible), and we also signal
  * towards that other CPU that it should break the lock ASAP.
  */
-#define BUILD_LOCK_OPS(op, locktype)					\
+#define BUILD_LOCK_OPS(op, locktype, lock_ctx_op)			\
 static void __lockfunc __raw_##op##_lock(locktype##_t *lock)		\
+	lock_ctx_op(lock)						\
 {									\
 	for (;;) {							\
 		preempt_disable();					\
@@ -78,6 +79,7 @@ static void __lockfunc __raw_##op##_lock(locktype##_t *lock)		\
 }									\
 									\
 static unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
+	lock_ctx_op(lock)						\
 {									\
 	unsigned long flags;						\
 									\
@@ -96,11 +98,13 @@ static unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
 }									\
 									\
 static void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)	\
+	lock_ctx_op(lock)						\
 {									\
 	_raw_##op##_lock_irqsave(lock);					\
 }									\
 									\
 static void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)		\
+	lock_ctx_op(lock)						\
 {									\
 	unsigned long flags;						\
 									\
@@ -123,11 +127,11 @@ static void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)		\
  *         __[spin|read|write]_lock_irqsave()
  *         __[spin|read|write]_lock_bh()
  */
-BUILD_LOCK_OPS(spin, raw_spinlock);
+BUILD_LOCK_OPS(spin, raw_spinlock, __acquires);
 
 #ifndef CONFIG_PREEMPT_RT
-BUILD_LOCK_OPS(read, rwlock);
-BUILD_LOCK_OPS(write, rwlock);
+BUILD_LOCK_OPS(read, rwlock, __acquires_shared);
+BUILD_LOCK_OPS(write, rwlock, __acquires);
 #endif
 
 #endif