Add support for Clang's context analysis for mutex.
Signed-off-by: Marco Elver <elver@google.com>
---
v5:
* Rename "context guard" -> "context lock".
v4:
* Rename capability -> context analysis.
v3:
* Switch to DECLARE_LOCK_GUARD_1_ATTRS() (suggested by Peter)
* __assert -> __assume rename
---
Documentation/dev-tools/context-analysis.rst | 2 +-
include/linux/mutex.h | 38 +++++++-----
include/linux/mutex_types.h | 4 +-
lib/test_context-analysis.c | 64 ++++++++++++++++++++
4 files changed, 90 insertions(+), 18 deletions(-)
diff --git a/Documentation/dev-tools/context-analysis.rst b/Documentation/dev-tools/context-analysis.rst
index 746a2d275fb2..1864b6cba4d1 100644
--- a/Documentation/dev-tools/context-analysis.rst
+++ b/Documentation/dev-tools/context-analysis.rst
@@ -79,7 +79,7 @@ Supported Kernel Primitives
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently the following synchronization primitives are supported:
-`raw_spinlock_t`, `spinlock_t`, `rwlock_t`.
+`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`.
For context locks with an initialization function (e.g., `spin_lock_init()`),
calling this function before initializing any guarded members or globals
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bf535f0118bb..89977c215cbd 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -62,6 +62,7 @@ do { \
static struct lock_class_key __key; \
\
__mutex_init((mutex), #mutex, &__key); \
+ __assume_ctx_lock(mutex); \
} while (0)
/**
@@ -182,13 +183,13 @@ static inline int __must_check __devm_mutex_init(struct device *dev, struct mute
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
- unsigned int subclass);
+ unsigned int subclass) __cond_acquires(0, lock);
extern int __must_check _mutex_lock_killable(struct mutex *lock,
- unsigned int subclass, struct lockdep_map *nest_lock);
-extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
+ unsigned int subclass, struct lockdep_map *nest_lock) __cond_acquires(0, lock);
+extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
@@ -211,10 +212,10 @@ do { \
_mutex_lock_killable(lock, subclass, NULL)
#else
-extern void mutex_lock(struct mutex *lock);
-extern int __must_check mutex_lock_interruptible(struct mutex *lock);
-extern int __must_check mutex_lock_killable(struct mutex *lock);
-extern void mutex_lock_io(struct mutex *lock);
+extern void mutex_lock(struct mutex *lock) __acquires(lock);
+extern int __must_check mutex_lock_interruptible(struct mutex *lock) __cond_acquires(0, lock);
+extern int __must_check mutex_lock_killable(struct mutex *lock) __cond_acquires(0, lock);
+extern void mutex_lock_io(struct mutex *lock) __acquires(lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
@@ -232,7 +233,7 @@ extern void mutex_lock_io(struct mutex *lock);
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) \
( \
@@ -242,17 +243,24 @@ extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest
#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
#else
-extern int mutex_trylock(struct mutex *lock);
+extern int mutex_trylock(struct mutex *lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
#endif
-extern void mutex_unlock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock) __releases(lock);
-extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_acquires(true, lock);
-DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
-DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T)
extern unsigned long mutex_get_owner(struct mutex *lock);
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
index fdf7f515fde8..80975935ec48 100644
--- a/include/linux/mutex_types.h
+++ b/include/linux/mutex_types.h
@@ -38,7 +38,7 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
-struct mutex {
+context_lock_struct(mutex) {
atomic_long_t owner;
raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -59,7 +59,7 @@ struct mutex {
*/
#include <linux/rtmutex.h>
-struct mutex {
+context_lock_struct(mutex) {
struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/lib/test_context-analysis.c b/lib/test_context-analysis.c
index 273fa9d34657..2b28d20c5f51 100644
--- a/lib/test_context-analysis.c
+++ b/lib/test_context-analysis.c
@@ -5,6 +5,7 @@
*/
#include <linux/build_bug.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
/*
@@ -144,3 +145,66 @@ TEST_SPINLOCK_COMMON(read_lock,
read_unlock,
read_trylock,
TEST_OP_RO);
+
+struct test_mutex_data {
+ struct mutex mtx;
+ int counter __guarded_by(&mtx);
+};
+
+static void __used test_mutex_init(struct test_mutex_data *d)
+{
+ mutex_init(&d->mtx);
+ d->counter = 0;
+}
+
+static void __used test_mutex_lock(struct test_mutex_data *d)
+{
+ mutex_lock(&d->mtx);
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ mutex_lock_io(&d->mtx);
+ d->counter++;
+ mutex_unlock(&d->mtx);
+}
+
+static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
+{
+ if (!mutex_lock_interruptible(&d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+ if (!mutex_lock_killable(&d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+ if (mutex_trylock(&d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+ if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+}
+
+static void __used test_mutex_assert(struct test_mutex_data *d)
+{
+ lockdep_assert_held(&d->mtx);
+ d->counter++;
+}
+
+static void __used test_mutex_guard(struct test_mutex_data *d)
+{
+ guard(mutex)(&d->mtx);
+ d->counter++;
+}
+
+static void __used test_mutex_cond_guard(struct test_mutex_data *d)
+{
+ scoped_cond_guard(mutex_try, return, &d->mtx) {
+ d->counter++;
+ }
+ scoped_cond_guard(mutex_intr, return, &d->mtx) {
+ d->counter++;
+ }
+}
--
2.52.0.322.g1dd061c0dc-goog
On 12/19/25 8:39 AM, Marco Elver wrote:
> diff --git a/include/linux/mutex.h b/include/linux/mutex.h
> index bf535f0118bb..89977c215cbd 100644
> --- a/include/linux/mutex.h
> +++ b/include/linux/mutex.h
> @@ -62,6 +62,7 @@ do { \
> static struct lock_class_key __key; \
> \
> __mutex_init((mutex), #mutex, &__key); \
> + __assume_ctx_lock(mutex); \
> } while (0)
The above type of change probably will have to be reverted. If I enable
context analysis for the entire kernel tree, drivers/base/devcoredump.c
doesn't build. The following error is reported:
drivers/base/devcoredump.c:406:2: error: acquiring mutex '_res->mutex'
that is already held [-Werror,-Wthread-safety-analysis]
406 | mutex_lock(&devcd->mutex);
| ^
dev_coredumpm_timeout() calls mutex_init() and mutex_lock() from the
same function. The above type of change breaks compilation of all code
that initializes and locks a synchronization object from the same
function. My understanding of dev_coredumpm_timeout() is that there is a
good reason for calling both mutex_init() and mutex_lock() from that
function. Possible solutions are disabling context analysis for that
function or removing __assume_ctx_lock() again from mutex_init(). Does
anyone want to share their opinion about this?
Thanks,
Bart.
On Thu, Jan 08, 2026 at 02:10PM -0800, 'Bart Van Assche' via kasan-dev wrote:
> On 12/19/25 8:39 AM, Marco Elver wrote:
> > diff --git a/include/linux/mutex.h b/include/linux/mutex.h
> > index bf535f0118bb..89977c215cbd 100644
> > --- a/include/linux/mutex.h
> > +++ b/include/linux/mutex.h
> > @@ -62,6 +62,7 @@ do { \
> > static struct lock_class_key __key; \
> > \
> > __mutex_init((mutex), #mutex, &__key); \
> > + __assume_ctx_lock(mutex); \
> > } while (0)
>
> The above type of change probably will have to be reverted. If I enable
> context analysis for the entire kernel tree, drivers/base/devcoredump.c
> doesn't build. The following error is reported:
>
> drivers/base/devcoredump.c:406:2: error: acquiring mutex '_res->mutex' that
> is already held [-Werror,-Wthread-safety-analysis]
> 406 | mutex_lock(&devcd->mutex);
> | ^
>
> dev_coredumpm_timeout() calls mutex_init() and mutex_lock() from the same
> function. The above type of change breaks compilation of all code
> that initializes and locks a synchronization object from the same
> function. My understanding of dev_coredumpm_timeout() is that there is a
> good reason for calling both mutex_init() and mutex_lock() from that
> function. Possible solutions are disabling context analysis for that
> function or removing __assume_ctx_lock() again from mutex_init(). Does
> anyone want to share their opinion about this?
Probably the most idiomatic option is to just factor out construction.
Clearly separating complex object construction from use also helps
readability regardless, esp. where concurrency is involved. We could
document such advice somewhere.
For the above case, this seems cleanest and also clearer to me:
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 55bdc7f5e59d..56ac8aa41608 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -339,6 +339,40 @@ void dev_coredump_put(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_coredump_put);
+static struct devcd_entry *
+dev_coredumpm_init(struct device *dev, struct module *owner, void *data,
+ size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen),
+ void (*free)(void *data))
+{
+ static atomic_t devcd_count = ATOMIC_INIT(0);
+ struct devcd_entry *devcd;
+
+ devcd = kzalloc(sizeof(*devcd), gfp);
+ if (!devcd)
+ return NULL;
+
+ devcd->owner = owner;
+ devcd->data = data;
+ devcd->datalen = datalen;
+ devcd->read = read;
+ devcd->free = free;
+ devcd->failing_dev = get_device(dev);
+ devcd->deleted = false;
+
+ mutex_init(&devcd->mutex);
+ device_initialize(&devcd->devcd_dev);
+
+ dev_set_name(&devcd->devcd_dev, "devcd%d",
+ atomic_inc_return(&devcd_count));
+ devcd->devcd_dev.class = &devcd_class;
+
+ dev_set_uevent_suppress(&devcd->devcd_dev, true);
+
+ return devcd;
+}
+
/**
* dev_coredumpm_timeout - create device coredump with read/free methods with a
* custom timeout.
@@ -364,7 +398,6 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
void (*free)(void *data),
unsigned long timeout)
{
- static atomic_t devcd_count = ATOMIC_INIT(0);
struct devcd_entry *devcd;
struct device *existing;
@@ -381,27 +414,10 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
if (!try_module_get(owner))
goto free;
- devcd = kzalloc(sizeof(*devcd), gfp);
+ devcd = dev_coredumpm_init(dev, owner, data, datalen, gfp, read, free);
if (!devcd)
goto put_module;
- devcd->owner = owner;
- devcd->data = data;
- devcd->datalen = datalen;
- devcd->read = read;
- devcd->free = free;
- devcd->failing_dev = get_device(dev);
- devcd->deleted = false;
-
- mutex_init(&devcd->mutex);
- device_initialize(&devcd->devcd_dev);
-
- dev_set_name(&devcd->devcd_dev, "devcd%d",
- atomic_inc_return(&devcd_count));
- devcd->devcd_dev.class = &devcd_class;
-
- dev_set_uevent_suppress(&devcd->devcd_dev, true);
-
/* devcd->mutex prevents devcd_del() completing until init finishes */
mutex_lock(&devcd->mutex);
devcd->init_completed = false;
On Fri, Jan 09, 2026 at 12:26:55AM +0100, Marco Elver wrote: > Probably the most idiomatic option is to just factor out construction. > Clearly separating complex object construction from use also helps > readability regardless, esp. where concurrency is involved. We could > document such advice somewhere. Initializing and locking a mutex (or spinlock, or other primitive) is a not too unusual pattern, often used when inserting an object into a hash table or other lookup data structure. So supporting it without creating pointless wrapper functions would be really useful. One thing that would be nice to have and probably help here is to have lock initializers that create the lock in a held state.
On Fri, Jan 09, 2026 at 07:02AM +0100, Christoph Hellwig wrote:
> On Fri, Jan 09, 2026 at 12:26:55AM +0100, Marco Elver wrote:
> > Probably the most idiomatic option is to just factor out construction.
> > Clearly separating complex object construction from use also helps
> > readability regardless, esp. where concurrency is involved. We could
> > document such advice somewhere.
>
> Initializing and locking a mutex (or spinlock, or other primitive) is a
> not too unusual pattern, often used when inserting an object into a
> hash table or other lookup data structure. So supporting it without
> creating pointless wrapper functions would be really useful. One thing
> that would be nice to have and probably help here is to have lock
> initializers that create the lock in a held state.
Fair point. Without new APIs, we can fix it with the below patch;
essentially "promoting" the context lock to "reentrant" during
initialization scope. It's not exactly well documented on the Clang
side, but is a side-effect of how reentrancy works in the analysis:
https://github.com/llvm/llvm-project/pull/175267
------ >8 ------
From 9c9b521b286f241f849dcc4f9efbd9582dabd3cc Mon Sep 17 00:00:00 2001
From: Marco Elver <elver@google.com>
Date: Sat, 10 Jan 2026 00:47:35 +0100
Subject: [PATCH] compiler-context-analysis: Support immediate acquisition
after initialization
When a lock is initialized (e.g. mutex_init()), we assume/assert that
the context lock is held to allow initialization of guarded members
within the same scope.
However, this previously prevented actually acquiring the lock within
that same scope, as the analyzer would report a double-lock warning:
mutex_init(&mtx);
...
mutex_lock(&mtx); // acquiring mutex 'mtx' that is already held
To fix (without new init+lock APIs), we can tell the analysis to treat
the "held" context lock resulting from initialization as reentrant,
allowing subsequent acquisitions to succeed.
To do so *only* within the initialization scope, we can cast the lock
pointer to any reentrant type for the init assume/assert. Introduce a
generic reentrant context lock type `struct __ctx_lock_init` and add
`__inits_ctx_lock()` that casts the lock pointer to this type before
assuming/asserting it.
This ensures that the initial "held" state is reentrant, allowing
patterns like:
mutex_init(&lock);
...
mutex_lock(&lock);
to compile without false positives, and avoids having to make all
context lock types reentrant outside an initialization scope.
The caveat here is missing real double-lock bugs right after init scope.
However, this is a classic trade-off of avoiding false positives against
(unlikely) false negatives.
Link: https://lore.kernel.org/all/57062131-e79e-42c2-aa0b-8f931cb8cac2@acm.org/
Reported-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Marco Elver <elver@google.com>
---
include/linux/compiler-context-analysis.h | 12 ++++++++++++
include/linux/local_lock_internal.h | 6 +++---
include/linux/mutex.h | 2 +-
include/linux/rwlock.h | 4 ++--
include/linux/rwlock_rt.h | 2 +-
include/linux/rwsem.h | 4 ++--
include/linux/seqlock.h | 2 +-
include/linux/spinlock.h | 8 ++++----
include/linux/spinlock_rt.h | 2 +-
include/linux/ww_mutex.h | 2 +-
lib/test_context-analysis.c | 3 +++
11 files changed, 31 insertions(+), 16 deletions(-)
diff --git a/include/linux/compiler-context-analysis.h b/include/linux/compiler-context-analysis.h
index db7e0d48d8f2..e056cd6e8aaa 100644
--- a/include/linux/compiler-context-analysis.h
+++ b/include/linux/compiler-context-analysis.h
@@ -43,6 +43,14 @@
# define __assumes_ctx_lock(...) __attribute__((assert_capability(__VA_ARGS__)))
# define __assumes_shared_ctx_lock(...) __attribute__((assert_shared_capability(__VA_ARGS__)))
+/*
+ * Generic reentrant context lock type that we cast to when initializing context
+ * locks with __assumes_ctx_lock(), so that we can support guarded member
+ * initialization, but also immediate use after initialization.
+ */
+struct __ctx_lock_type(init_generic) __reentrant_ctx_lock __ctx_lock_init;
+# define __inits_ctx_lock(var) __assumes_ctx_lock((const struct __ctx_lock_init *)(var))
+
/**
* __guarded_by - struct member and globals attribute, declares variable
* only accessible within active context
@@ -120,6 +128,8 @@
__attribute__((overloadable)) __assumes_ctx_lock(var) { } \
static __always_inline void __assume_shared_ctx_lock(const struct name *var) \
__attribute__((overloadable)) __assumes_shared_ctx_lock(var) { } \
+ static __always_inline void __init_ctx_lock(const struct name *var) \
+ __attribute__((overloadable)) __inits_ctx_lock(var) { } \
struct name
/**
@@ -162,6 +172,7 @@
# define __releases_shared_ctx_lock(...)
# define __assumes_ctx_lock(...)
# define __assumes_shared_ctx_lock(...)
+# define __inits_ctx_lock(var)
# define __returns_ctx_lock(var)
# define __guarded_by(...)
# define __pt_guarded_by(...)
@@ -176,6 +187,7 @@
# define __release_shared_ctx_lock(var) do { } while (0)
# define __assume_ctx_lock(var) do { (void)(var); } while (0)
# define __assume_shared_ctx_lock(var) do { (void)(var); } while (0)
+# define __init_ctx_lock(var) do { (void)(var); } while (0)
# define context_lock_struct(name, ...) struct __VA_ARGS__ name
# define disable_context_analysis()
# define enable_context_analysis()
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index e8c4803d8db4..36b8628d09fd 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -86,13 +86,13 @@ do { \
0, LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_PERCPU); \
local_lock_debug_init(lock); \
- __assume_ctx_lock(lock); \
+ __init_ctx_lock(lock); \
} while (0)
#define __local_trylock_init(lock) \
do { \
__local_lock_init((local_lock_t *)lock); \
- __assume_ctx_lock(lock); \
+ __init_ctx_lock(lock); \
} while (0)
#define __spinlock_nested_bh_init(lock) \
@@ -104,7 +104,7 @@ do { \
0, LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_NORMAL); \
local_lock_debug_init(lock); \
- __assume_ctx_lock(lock); \
+ __init_ctx_lock(lock); \
} while (0)
#define __local_lock_acquire(lock) \
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 89977c215cbd..5d2ef75c4fdb 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -62,7 +62,7 @@ do { \
static struct lock_class_key __key; \
\
__mutex_init((mutex), #mutex, &__key); \
- __assume_ctx_lock(mutex); \
+ __init_ctx_lock(mutex); \
} while (0)
/**
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 65a5b55e1bcd..7e171634d2c4 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -22,11 +22,11 @@ do { \
static struct lock_class_key __key; \
\
__rwlock_init((lock), #lock, &__key); \
- __assume_ctx_lock(lock); \
+ __init_ctx_lock(lock); \
} while (0)
#else
# define rwlock_init(lock) \
- do { *(lock) = __RW_LOCK_UNLOCKED(lock); __assume_ctx_lock(lock); } while (0)
+ do { *(lock) = __RW_LOCK_UNLOCKED(lock); __init_ctx_lock(lock); } while (0)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
index 37b387dcab21..1e087a6ce2cf 100644
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -22,7 +22,7 @@ do { \
\
init_rwbase_rt(&(rwl)->rwbase); \
__rt_rwlock_init(rwl, #rwl, &__key); \
- __assume_ctx_lock(rwl); \
+ __init_ctx_lock(rwl); \
} while (0)
extern void rt_read_lock(rwlock_t *rwlock) __acquires_shared(rwlock);
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8da14a08a4e1..6ea7d2a23580 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -121,7 +121,7 @@ do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
- __assume_ctx_lock(sem); \
+ __init_ctx_lock(sem); \
} while (0)
/*
@@ -175,7 +175,7 @@ do { \
static struct lock_class_key __key; \
\
__init_rwsem((sem), #sem, &__key); \
- __assume_ctx_lock(sem); \
+ __init_ctx_lock(sem); \
} while (0)
static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 113320911a09..a0670adb4b6e 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -816,7 +816,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
do { \
spin_lock_init(&(sl)->lock); \
seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
- __assume_ctx_lock(sl); \
+ __init_ctx_lock(sl); \
} while (0)
/**
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 396b8c5d6c1b..e50372a5f7d1 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -106,12 +106,12 @@ do { \
static struct lock_class_key __key; \
\
__raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
- __assume_ctx_lock(lock); \
+ __init_ctx_lock(lock); \
} while (0)
#else
# define raw_spin_lock_init(lock) \
- do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); __assume_ctx_lock(lock); } while (0)
+ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); __init_ctx_lock(lock); } while (0)
#endif
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
@@ -324,7 +324,7 @@ do { \
\
__raw_spin_lock_init(spinlock_check(lock), \
#lock, &__key, LD_WAIT_CONFIG); \
- __assume_ctx_lock(lock); \
+ __init_ctx_lock(lock); \
} while (0)
#else
@@ -333,7 +333,7 @@ do { \
do { \
spinlock_check(_lock); \
*(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
- __assume_ctx_lock(_lock); \
+ __init_ctx_lock(_lock); \
} while (0)
#endif
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 0a585768358f..154d7290bd99 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -20,7 +20,7 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
do { \
rt_mutex_base_init(&(slock)->lock); \
__rt_spin_lock_init(slock, name, key, percpu); \
- __assume_ctx_lock(slock); \
+ __init_ctx_lock(slock); \
} while (0)
#define _spin_lock_init(slock, percpu) \
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 58e959ee10e9..ecb5564ee70d 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -107,7 +107,7 @@ context_lock_struct(ww_acquire_ctx) {
*/
static inline void ww_mutex_init(struct ww_mutex *lock,
struct ww_class *ww_class)
- __assumes_ctx_lock(lock)
+ __inits_ctx_lock(lock)
{
ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
lock->ctx = NULL;
diff --git a/lib/test_context-analysis.c b/lib/test_context-analysis.c
index 1c5a381461fc..2f733b5cc650 100644
--- a/lib/test_context-analysis.c
+++ b/lib/test_context-analysis.c
@@ -165,6 +165,9 @@ static void __used test_mutex_init(struct test_mutex_data *d)
{
mutex_init(&d->mtx);
d->counter = 0;
+
+ mutex_lock(&d->mtx);
+ mutex_unlock(&d->mtx);
}
static void __used test_mutex_lock(struct test_mutex_data *d)
--
2.52.0.457.g6b5491de43-goog
On Fri, 9 Jan 2026 07:02:49 +0100 Christoph Hellwig <hch@lst.de> wrote: > On Fri, Jan 09, 2026 at 12:26:55AM +0100, Marco Elver wrote: > > Probably the most idiomatic option is to just factor out construction. > > Clearly separating complex object construction from use also helps > > readability regardless, esp. where concurrency is involved. We could > > document such advice somewhere. > > Initializing and locking a mutex (or spinlock, or other primitive) is a > not too unusual pattern, often used when inserting an object into a > hash table or other lookup data structure. So supporting it without > creating pointless wrapper functions would be really useful. One thing > that would be nice to have and probably help here is to have lock > initializers that create the lock in a held state. Right. If tooling can't handle a simple pattern of initializing a lock than taking it, that's a hard show stopper of adding that tooling. -- Steve
The following commit has been merged into the locking/core branch of tip:
Commit-ID: 370f0a345a70fe36d0185abf87c7ee8e70572e06
Gitweb: https://git.kernel.org/tip/370f0a345a70fe36d0185abf87c7ee8e70572e06
Author: Marco Elver <elver@google.com>
AuthorDate: Fri, 19 Dec 2025 16:39:59 +01:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Mon, 05 Jan 2026 16:43:29 +01:00
locking/mutex: Support Clang's context analysis
Add support for Clang's context analysis for mutex.
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://patch.msgid.link/20251219154418.3592607-11-elver@google.com
---
Documentation/dev-tools/context-analysis.rst | 2 +-
include/linux/mutex.h | 38 ++++++-----
include/linux/mutex_types.h | 4 +-
lib/test_context-analysis.c | 64 +++++++++++++++++++-
4 files changed, 90 insertions(+), 18 deletions(-)
diff --git a/Documentation/dev-tools/context-analysis.rst b/Documentation/dev-tools/context-analysis.rst
index 746a2d2..1864b6c 100644
--- a/Documentation/dev-tools/context-analysis.rst
+++ b/Documentation/dev-tools/context-analysis.rst
@@ -79,7 +79,7 @@ Supported Kernel Primitives
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently the following synchronization primitives are supported:
-`raw_spinlock_t`, `spinlock_t`, `rwlock_t`.
+`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`.
For context locks with an initialization function (e.g., `spin_lock_init()`),
calling this function before initializing any guarded members or globals
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bf535f0..89977c2 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -62,6 +62,7 @@ do { \
static struct lock_class_key __key; \
\
__mutex_init((mutex), #mutex, &__key); \
+ __assume_ctx_lock(mutex); \
} while (0)
/**
@@ -182,13 +183,13 @@ static inline int __must_check __devm_mutex_init(struct device *dev, struct mute
* Also see Documentation/locking/mutex-design.rst.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
- unsigned int subclass);
+ unsigned int subclass) __cond_acquires(0, lock);
extern int __must_check _mutex_lock_killable(struct mutex *lock,
- unsigned int subclass, struct lockdep_map *nest_lock);
-extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
+ unsigned int subclass, struct lockdep_map *nest_lock) __cond_acquires(0, lock);
+extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) __acquires(lock);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
@@ -211,10 +212,10 @@ do { \
_mutex_lock_killable(lock, subclass, NULL)
#else
-extern void mutex_lock(struct mutex *lock);
-extern int __must_check mutex_lock_interruptible(struct mutex *lock);
-extern int __must_check mutex_lock_killable(struct mutex *lock);
-extern void mutex_lock_io(struct mutex *lock);
+extern void mutex_lock(struct mutex *lock) __acquires(lock);
+extern int __must_check mutex_lock_interruptible(struct mutex *lock) __cond_acquires(0, lock);
+extern int __must_check mutex_lock_killable(struct mutex *lock) __cond_acquires(0, lock);
+extern void mutex_lock_io(struct mutex *lock) __acquires(lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
@@ -232,7 +233,7 @@ extern void mutex_lock_io(struct mutex *lock);
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) \
( \
@@ -242,17 +243,24 @@ extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest
#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
#else
-extern int mutex_trylock(struct mutex *lock);
+extern int mutex_trylock(struct mutex *lock) __cond_acquires(true, lock);
#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
#endif
-extern void mutex_unlock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock) __releases(lock);
-extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) __cond_acquires(true, lock);
-DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
-DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
-DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0)
+DEFINE_LOCK_GUARD_1(mutex, struct mutex, mutex_lock(_T->lock), mutex_unlock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _try, mutex_trylock(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(mutex, _intr, mutex_lock_interruptible(_T->lock), _RET == 0)
+
+DECLARE_LOCK_GUARD_1_ATTRS(mutex, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_try, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_try_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_try, _T)
+DECLARE_LOCK_GUARD_1_ATTRS(mutex_intr, __acquires(_T), __releases(*(struct mutex **)_T))
+#define class_mutex_intr_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(mutex_intr, _T)
extern unsigned long mutex_get_owner(struct mutex *lock);
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
index fdf7f51..8097593 100644
--- a/include/linux/mutex_types.h
+++ b/include/linux/mutex_types.h
@@ -38,7 +38,7 @@
* - detects multi-task circular deadlocks and prints out all affected
* locks and tasks (and only those tasks)
*/
-struct mutex {
+context_lock_struct(mutex) {
atomic_long_t owner;
raw_spinlock_t wait_lock;
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -59,7 +59,7 @@ struct mutex {
*/
#include <linux/rtmutex.h>
-struct mutex {
+context_lock_struct(mutex) {
struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/lib/test_context-analysis.c b/lib/test_context-analysis.c
index 273fa9d..2b28d20 100644
--- a/lib/test_context-analysis.c
+++ b/lib/test_context-analysis.c
@@ -5,6 +5,7 @@
*/
#include <linux/build_bug.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
/*
@@ -144,3 +145,66 @@ TEST_SPINLOCK_COMMON(read_lock,
read_unlock,
read_trylock,
TEST_OP_RO);
+
+struct test_mutex_data {
+ struct mutex mtx;
+ int counter __guarded_by(&mtx);
+};
+
+static void __used test_mutex_init(struct test_mutex_data *d)
+{
+ mutex_init(&d->mtx);
+ d->counter = 0;
+}
+
+static void __used test_mutex_lock(struct test_mutex_data *d)
+{
+ mutex_lock(&d->mtx);
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ mutex_lock_io(&d->mtx);
+ d->counter++;
+ mutex_unlock(&d->mtx);
+}
+
+static void __used test_mutex_trylock(struct test_mutex_data *d, atomic_t *a)
+{
+ if (!mutex_lock_interruptible(&d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+ if (!mutex_lock_killable(&d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+ if (mutex_trylock(&d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+ if (atomic_dec_and_mutex_lock(a, &d->mtx)) {
+ d->counter++;
+ mutex_unlock(&d->mtx);
+ }
+}
+
+static void __used test_mutex_assert(struct test_mutex_data *d)
+{
+ lockdep_assert_held(&d->mtx);
+ d->counter++;
+}
+
+static void __used test_mutex_guard(struct test_mutex_data *d)
+{
+ guard(mutex)(&d->mtx);
+ d->counter++;
+}
+
+static void __used test_mutex_cond_guard(struct test_mutex_data *d)
+{
+ scoped_cond_guard(mutex_try, return, &d->mtx) {
+ d->counter++;
+ }
+ scoped_cond_guard(mutex_intr, return, &d->mtx) {
+ d->counter++;
+ }
+}
© 2016 - 2026 Red Hat, Inc.