include/linux/rwlock_api_smp.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-)
Move _raw_write_trylock_irqsave() after the _raw_write_trylock macro to
ensure it uses the inlined version, fixing a linker error when inlining
is enabled. This is the case on s390:
>> ld.lld: error: undefined symbol: _raw_write_trylock
>>> referenced by rwlock_api_smp.h:48 (include/linux/rwlock_api_smp.h:48)
>>> lib/test_context-analysis.o:(test_write_trylock_extra) in archive vmlinux.a
>>> referenced by rwlock_api_smp.h:48 (include/linux/rwlock_api_smp.h:48)
>>> lib/test_context-analysis.o:(test_write_trylock_extra) in archive vmlinux.a
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202602032101.dbxRfsWO-lkp@intel.com/
Signed-off-by: Marco Elver <elver@google.com>
---
include/linux/rwlock_api_smp.h | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index d903b17c46ca..61a852609eab 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -41,16 +41,6 @@ void __lockfunc
_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
-static inline bool _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
- __cond_acquires(true, lock)
-{
- local_irq_save(*flags);
- if (_raw_write_trylock(lock))
- return true;
- local_irq_restore(*flags);
- return false;
-}
-
#ifdef CONFIG_INLINE_READ_LOCK
#define _raw_read_lock(lock) __raw_read_lock(lock)
#endif
@@ -147,6 +137,16 @@ static inline int __raw_write_trylock(rwlock_t *lock)
return 0;
}
+static inline bool _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ local_irq_save(*flags);
+ if (_raw_write_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
--
2.53.0.rc2.204.g2597b5adb4-goog
On 2/3/26 2:50 PM, Marco Elver wrote: > Move _raw_write_trylock_irqsave() after the _raw_write_trylock macro to > ensure it uses the inlined version, fixing a linker error when inlining > is enabled. > [ ... ] > Reported-by: kernel test robot <lkp@intel.com> > Closes: https://lore.kernel.org/oe-kbuild-all/202602032101.dbxRfsWO-lkp@intel.com/ > Signed-off-by: Marco Elver <elver@google.com> Has it been considered to add a Fixes: tag? Anyway: Reviewed-by: Bart Van Assche <bvanassche@acm.org>
The following commit has been merged into the locking/core branch of tip:
Commit-ID: 7a562d5d2396c9c78fbbced7ae81bcfcfa0fde3f
Gitweb: https://git.kernel.org/tip/7a562d5d2396c9c78fbbced7ae81bcfcfa0fde3f
Author: Marco Elver <elver@google.com>
AuthorDate: Tue, 03 Feb 2026 23:50:10 +01:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Thu, 05 Feb 2026 09:43:48 +01:00
locking/rwlock: Fix write_trylock_irqsave() with CONFIG_INLINE_WRITE_TRYLOCK
Move _raw_write_trylock_irqsave() after the _raw_write_trylock macro to
ensure it uses the inlined version, fixing a linker error when inlining
is enabled. This is the case on s390:
>> ld.lld: error: undefined symbol: _raw_write_trylock
>>> referenced by rwlock_api_smp.h:48 (include/linux/rwlock_api_smp.h:48)
>>> lib/test_context-analysis.o:(test_write_trylock_extra) in archive vmlinux.a
>>> referenced by rwlock_api_smp.h:48 (include/linux/rwlock_api_smp.h:48)
>>> lib/test_context-analysis.o:(test_write_trylock_extra) in archive vmlinux.a
Closes: https://lore.kernel.org/oe-kbuild-all/202602032101.dbxRfsWO-lkp@intel.com/
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Link: https://patch.msgid.link/20260203225114.3493538-1-elver@google.com
---
include/linux/rwlock_api_smp.h | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index d903b17..61a8526 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -41,16 +41,6 @@ void __lockfunc
_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
-static inline bool _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
- __cond_acquires(true, lock)
-{
- local_irq_save(*flags);
- if (_raw_write_trylock(lock))
- return true;
- local_irq_restore(*flags);
- return false;
-}
-
#ifdef CONFIG_INLINE_READ_LOCK
#define _raw_read_lock(lock) __raw_read_lock(lock)
#endif
@@ -147,6 +137,16 @@ static inline int __raw_write_trylock(rwlock_t *lock)
return 0;
}
+static inline bool _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+ __cond_acquires(true, lock) __no_context_analysis
+{
+ local_irq_save(*flags);
+ if (_raw_write_trylock(lock))
+ return true;
+ local_irq_restore(*flags);
+ return false;
+}
+
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
© 2016 - 2026 Red Hat, Inc.