rust/kernel/sync/lock.rs | 7 +++++++ rust/kernel/sync/lock/global.rs | 2 ++ rust/kernel/sync/lock/mutex.rs | 5 +++++ rust/kernel/sync/lock/spinlock.rs | 5 +++++ 4 files changed, 19 insertions(+)
While debugging a different issue [1], I inspected a rust_binder.ko file
and noticed the following relocation:
R_AARCH64_CALL26 _RNvXNtNtNtCsdfZWD8DztAw_6kernel4sync4lock8spinlockNtB2_15SpinLockBackendNtB4_7Backend6unlock
This relocation (and a similar one for lock) occurred many times
throughout the module. That is not really useful because all this
function does is call spin_unlock(), so what we actually want here is
that a call to spin_unlock() dirctly is generated in favor of this
wrapper method.
Thus, mark these methods inline.
Link: https://lore.kernel.org/p/20251111-binder-fix-list-remove-v1-0-8ed14a0da63d@google.com
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
---
Changes in v2:
- Rebased on v6.19-rc1.
- Link to v1: https://lore.kernel.org/r/20251113-inline-lock-unlock-v1-1-1b6e8c323bcf@google.com
---
rust/kernel/sync/lock.rs | 7 +++++++
rust/kernel/sync/lock/global.rs | 2 ++
rust/kernel/sync/lock/mutex.rs | 5 +++++
rust/kernel/sync/lock/spinlock.rs | 5 +++++
4 files changed, 19 insertions(+)
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 46a57d1fc309d330a4c82fe1ecce18297205faed..10b6b5e9b024f2d3ee7458fc2f64190bd38f4391 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -156,6 +156,7 @@ impl<B: Backend> Lock<(), B> {
/// the whole lifetime of `'a`.
///
/// [`State`]: Backend::State
+ #[inline]
pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
// SAFETY:
// - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
@@ -169,6 +170,7 @@ pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Acquires the lock and gives the caller access to the data protected by it.
+ #[inline]
pub fn lock(&self) -> Guard<'_, T, B> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
@@ -182,6 +184,7 @@ pub fn lock(&self) -> Guard<'_, T, B> {
/// Returns a guard that can be used to access the data protected by the lock if successful.
// `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
#[must_use = "if unused, the lock will be immediately unlocked"]
+ #[inline]
pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
@@ -275,6 +278,7 @@ pub fn as_mut(&mut self) -> Pin<&mut T> {
impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
type Target = T;
+ #[inline]
fn deref(&self) -> &Self::Target {
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
unsafe { &*self.lock.data.get() }
@@ -285,6 +289,7 @@ impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
where
T: Unpin,
{
+ #[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
unsafe { &mut *self.lock.data.get() }
@@ -292,6 +297,7 @@ fn deref_mut(&mut self) -> &mut Self::Target {
}
impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
+ #[inline]
fn drop(&mut self) {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
@@ -304,6 +310,7 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
/// # Safety
///
/// The caller must ensure that it owns the lock.
+ #[inline]
pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
// SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
unsafe { B::assert_is_held(lock.state.get()) };
diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs
index eab48108a4aebeb72b2026d6c21d5748b0e62f0a..aecbdc34738fbd746de79eb0f1abe995478ad785 100644
--- a/rust/kernel/sync/lock/global.rs
+++ b/rust/kernel/sync/lock/global.rs
@@ -77,6 +77,7 @@ pub unsafe fn init(&'static self) {
}
/// Lock this global lock.
+ #[inline]
pub fn lock(&'static self) -> GlobalGuard<B> {
GlobalGuard {
inner: self.inner.lock(),
@@ -84,6 +85,7 @@ pub fn lock(&'static self) -> GlobalGuard<B> {
}
/// Try to lock this global lock.
+ #[inline]
pub fn try_lock(&'static self) -> Option<GlobalGuard<B>> {
Some(GlobalGuard {
inner: self.inner.try_lock()?,
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 581cee7ab842ad62ec144e67138676c000a3f5e4..cda0203efefb9fcb32c7eab28721e8678ccec575 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -102,6 +102,7 @@ unsafe impl super::Backend for MutexBackend {
type State = bindings::mutex;
type GuardState = ();
+ #[inline]
unsafe fn init(
ptr: *mut Self::State,
name: *const crate::ffi::c_char,
@@ -112,18 +113,21 @@ unsafe fn init(
unsafe { bindings::__mutex_init(ptr, name, key) }
}
+ #[inline]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
// SAFETY: The safety requirements of this function ensure that `ptr` points to valid
// memory, and that it has been initialised before.
unsafe { bindings::mutex_lock(ptr) };
}
+ #[inline]
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
// caller is the owner of the mutex.
unsafe { bindings::mutex_unlock(ptr) };
}
+ #[inline]
unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
let result = unsafe { bindings::mutex_trylock(ptr) };
@@ -135,6 +139,7 @@ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
}
}
+ #[inline]
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::mutex_assert_is_held(ptr) }
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index d7be38ccbdc7dc4d70caaed0e7088f59f65fc6d1..ef76fa07ca3a2b5e32e956e828be5b295da0bc28 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -101,6 +101,7 @@ unsafe impl super::Backend for SpinLockBackend {
type State = bindings::spinlock_t;
type GuardState = ();
+ #[inline]
unsafe fn init(
ptr: *mut Self::State,
name: *const crate::ffi::c_char,
@@ -111,18 +112,21 @@ unsafe fn init(
unsafe { bindings::__spin_lock_init(ptr, name, key) }
}
+ #[inline]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
// SAFETY: The safety requirements of this function ensure that `ptr` points to valid
// memory, and that it has been initialised before.
unsafe { bindings::spin_lock(ptr) }
}
+ #[inline]
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
// caller is the owner of the spinlock.
unsafe { bindings::spin_unlock(ptr) }
}
+ #[inline]
unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
let result = unsafe { bindings::spin_trylock(ptr) };
@@ -134,6 +138,7 @@ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
}
}
+ #[inline]
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::spin_assert_is_held(ptr) }
---
base-commit: 8f0b4cce4481fb22653697cced8d0d04027cb1e8
change-id: 20251113-inline-lock-unlock-b1726632a99d
Best regards,
--
Alice Ryhl <aliceryhl@google.com>
On Thu, Dec 18, 2025 at 12:10:23PM +0000, Alice Ryhl wrote: > While debugging a different issue [1], I inspected a rust_binder.ko file > and noticed the following relocation: > > R_AARCH64_CALL26 _RNvXNtNtNtCsdfZWD8DztAw_6kernel4sync4lock8spinlockNtB2_15SpinLockBackendNtB4_7Backend6unlock > > This relocation (and a similar one for lock) occurred many times > throughout the module. That is not really useful because all this > function does is call spin_unlock(), so what we actually want here is > that a call to spin_unlock() dirctly is generated in favor of this > wrapper method. > > Thus, mark these methods inline. > > Link: https://lore.kernel.org/p/20251111-binder-fix-list-remove-v1-0-8ed14a0da63d@google.com > Signed-off-by: Alice Ryhl <aliceryhl@google.com> > --- > Changes in v2: > - Rebased on v6.19-rc1. > - Link to v1: https://lore.kernel.org/r/20251113-inline-lock-unlock-v1-1-1b6e8c323bcf@google.com Queued, thanks you! Regards, Boqun
On Thu, 18 Dec 2025 12:10:23 +0000 Alice Ryhl <aliceryhl@google.com> wrote: > While debugging a different issue [1], I inspected a rust_binder.ko file > and noticed the following relocation: > > R_AARCH64_CALL26 _RNvXNtNtNtCsdfZWD8DztAw_6kernel4sync4lock8spinlockNtB2_15SpinLockBackendNtB4_7Backend6unlock > > This relocation (and a similar one for lock) occurred many times > throughout the module. That is not really useful because all this > function does is call spin_unlock(), so what we actually want here is > that a call to spin_unlock() dirctly is generated in favor of this > wrapper method. > > Thus, mark these methods inline. > > Link: https://lore.kernel.org/p/20251111-binder-fix-list-remove-v1-0-8ed14a0da63d@google.com > Signed-off-by: Alice Ryhl <aliceryhl@google.com> Reviewed-by: Gary Guo <gary@garyguo.net> Best, Gary > --- > Changes in v2: > - Rebased on v6.19-rc1. > - Link to v1: https://lore.kernel.org/r/20251113-inline-lock-unlock-v1-1-1b6e8c323bcf@google.com > --- > rust/kernel/sync/lock.rs | 7 +++++++ > rust/kernel/sync/lock/global.rs | 2 ++ > rust/kernel/sync/lock/mutex.rs | 5 +++++ > rust/kernel/sync/lock/spinlock.rs | 5 +++++ > 4 files changed, 19 insertions(+)
> On 18 Dec 2025, at 09:10, Alice Ryhl <aliceryhl@google.com> wrote:
>
> While debugging a different issue [1], I inspected a rust_binder.ko file
> and noticed the following relocation:
>
> R_AARCH64_CALL26 _RNvXNtNtNtCsdfZWD8DztAw_6kernel4sync4lock8spinlockNtB2_15SpinLockBackendNtB4_7Backend6unlock
>
> This relocation (and a similar one for lock) occurred many times
> throughout the module. That is not really useful because all this
> function does is call spin_unlock(), so what we actually want here is
> that a call to spin_unlock() dirctly is generated in favor of this
> wrapper method.
>
> Thus, mark these methods inline.
>
> Link: https://lore.kernel.org/p/20251111-binder-fix-list-remove-v1-0-8ed14a0da63d@google.com
> Signed-off-by: Alice Ryhl <aliceryhl@google.com>
> ---
> Changes in v2:
> - Rebased on v6.19-rc1.
> - Link to v1: https://lore.kernel.org/r/20251113-inline-lock-unlock-v1-1-1b6e8c323bcf@google.com
> ---
> rust/kernel/sync/lock.rs | 7 +++++++
> rust/kernel/sync/lock/global.rs | 2 ++
> rust/kernel/sync/lock/mutex.rs | 5 +++++
> rust/kernel/sync/lock/spinlock.rs | 5 +++++
> 4 files changed, 19 insertions(+)
>
> diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
> index 46a57d1fc309d330a4c82fe1ecce18297205faed..10b6b5e9b024f2d3ee7458fc2f64190bd38f4391 100644
> --- a/rust/kernel/sync/lock.rs
> +++ b/rust/kernel/sync/lock.rs
> @@ -156,6 +156,7 @@ impl<B: Backend> Lock<(), B> {
> /// the whole lifetime of `'a`.
> ///
> /// [`State`]: Backend::State
> + #[inline]
> pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
> // SAFETY:
> // - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
> @@ -169,6 +170,7 @@ pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
>
> impl<T: ?Sized, B: Backend> Lock<T, B> {
> /// Acquires the lock and gives the caller access to the data protected by it.
> + #[inline]
> pub fn lock(&self) -> Guard<'_, T, B> {
> // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
> // that `init` was called.
> @@ -182,6 +184,7 @@ pub fn lock(&self) -> Guard<'_, T, B> {
> /// Returns a guard that can be used to access the data protected by the lock if successful.
> // `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
> #[must_use = "if unused, the lock will be immediately unlocked"]
> + #[inline]
> pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
> // SAFETY: The constructor of the type calls `init`, so the existence of the object proves
> // that `init` was called.
> @@ -275,6 +278,7 @@ pub fn as_mut(&mut self) -> Pin<&mut T> {
> impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
> type Target = T;
>
> + #[inline]
> fn deref(&self) -> &Self::Target {
> // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
> unsafe { &*self.lock.data.get() }
> @@ -285,6 +289,7 @@ impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
> where
> T: Unpin,
> {
> + #[inline]
> fn deref_mut(&mut self) -> &mut Self::Target {
> // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
> unsafe { &mut *self.lock.data.get() }
> @@ -292,6 +297,7 @@ fn deref_mut(&mut self) -> &mut Self::Target {
> }
>
> impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
> + #[inline]
> fn drop(&mut self) {
> // SAFETY: The caller owns the lock, so it is safe to unlock it.
> unsafe { B::unlock(self.lock.state.get(), &self.state) };
> @@ -304,6 +310,7 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
> /// # Safety
> ///
> /// The caller must ensure that it owns the lock.
> + #[inline]
> pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
> // SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
> unsafe { B::assert_is_held(lock.state.get()) };
> diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs
> index eab48108a4aebeb72b2026d6c21d5748b0e62f0a..aecbdc34738fbd746de79eb0f1abe995478ad785 100644
> --- a/rust/kernel/sync/lock/global.rs
> +++ b/rust/kernel/sync/lock/global.rs
> @@ -77,6 +77,7 @@ pub unsafe fn init(&'static self) {
> }
>
> /// Lock this global lock.
> + #[inline]
> pub fn lock(&'static self) -> GlobalGuard<B> {
> GlobalGuard {
> inner: self.inner.lock(),
> @@ -84,6 +85,7 @@ pub fn lock(&'static self) -> GlobalGuard<B> {
> }
>
> /// Try to lock this global lock.
> + #[inline]
> pub fn try_lock(&'static self) -> Option<GlobalGuard<B>> {
> Some(GlobalGuard {
> inner: self.inner.try_lock()?,
> diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
> index 581cee7ab842ad62ec144e67138676c000a3f5e4..cda0203efefb9fcb32c7eab28721e8678ccec575 100644
> --- a/rust/kernel/sync/lock/mutex.rs
> +++ b/rust/kernel/sync/lock/mutex.rs
> @@ -102,6 +102,7 @@ unsafe impl super::Backend for MutexBackend {
> type State = bindings::mutex;
> type GuardState = ();
>
> + #[inline]
> unsafe fn init(
> ptr: *mut Self::State,
> name: *const crate::ffi::c_char,
> @@ -112,18 +113,21 @@ unsafe fn init(
> unsafe { bindings::__mutex_init(ptr, name, key) }
> }
>
> + #[inline]
> unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
> // SAFETY: The safety requirements of this function ensure that `ptr` points to valid
> // memory, and that it has been initialised before.
> unsafe { bindings::mutex_lock(ptr) };
> }
>
> + #[inline]
> unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
> // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
> // caller is the owner of the mutex.
> unsafe { bindings::mutex_unlock(ptr) };
> }
>
> + #[inline]
> unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
> // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
> let result = unsafe { bindings::mutex_trylock(ptr) };
> @@ -135,6 +139,7 @@ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
> }
> }
>
> + #[inline]
> unsafe fn assert_is_held(ptr: *mut Self::State) {
> // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
> unsafe { bindings::mutex_assert_is_held(ptr) }
> diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
> index d7be38ccbdc7dc4d70caaed0e7088f59f65fc6d1..ef76fa07ca3a2b5e32e956e828be5b295da0bc28 100644
> --- a/rust/kernel/sync/lock/spinlock.rs
> +++ b/rust/kernel/sync/lock/spinlock.rs
> @@ -101,6 +101,7 @@ unsafe impl super::Backend for SpinLockBackend {
> type State = bindings::spinlock_t;
> type GuardState = ();
>
> + #[inline]
> unsafe fn init(
> ptr: *mut Self::State,
> name: *const crate::ffi::c_char,
> @@ -111,18 +112,21 @@ unsafe fn init(
> unsafe { bindings::__spin_lock_init(ptr, name, key) }
> }
>
> + #[inline]
> unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
> // SAFETY: The safety requirements of this function ensure that `ptr` points to valid
> // memory, and that it has been initialised before.
> unsafe { bindings::spin_lock(ptr) }
> }
>
> + #[inline]
> unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
> // SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
> // caller is the owner of the spinlock.
> unsafe { bindings::spin_unlock(ptr) }
> }
>
> + #[inline]
> unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
> // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
> let result = unsafe { bindings::spin_trylock(ptr) };
> @@ -134,6 +138,7 @@ unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
> }
> }
>
> + #[inline]
> unsafe fn assert_is_held(ptr: *mut Self::State) {
> // SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
> unsafe { bindings::spin_assert_is_held(ptr) }
>
> ---
> base-commit: 8f0b4cce4481fb22653697cced8d0d04027cb1e8
> change-id: 20251113-inline-lock-unlock-b1726632a99d
>
> Best regards,
> --
> Alice Ryhl <aliceryhl@google.com>
>
>
I wonder if _everything_ has to be inlined, i.e.: even things like assert_is_held(), init() and etc.
In any case,
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.com>
The following commit has been merged into the locking/core branch of tip:
Commit-ID: ccf9e070116a81d29aae30db501d562c8efd1ed8
Gitweb: https://git.kernel.org/tip/ccf9e070116a81d29aae30db501d562c8efd1ed8
Author: Alice Ryhl <aliceryhl@google.com>
AuthorDate: Thu, 18 Dec 2025 12:10:23
Committer: Boqun Feng <boqun.feng@gmail.com>
CommitterDate: Sat, 10 Jan 2026 10:53:46 +08:00
rust: sync: Inline various lock related methods
While debugging a different issue [1], the following relocation was
noticed in the rust_binder.ko file:
R_AARCH64_CALL26 _RNvXNtNtNtCsdfZWD8DztAw_6kernel4sync4lock8spinlockNtB2_15SpinLockBackendNtB4_7Backend6unlock
This relocation (and a similar one for lock) occurred many times
throughout the module. That is not really useful because all this
function does is call spin_unlock(), so what we actually want here is
that a call to spin_unlock() dirctly is generated in favor of this
wrapper method.
Thus, mark these methods inline.
[boqun: Reword the commit message a bit]
Link: https://lore.kernel.org/p/20251111-binder-fix-list-remove-v1-0-8ed14a0da63d@google.com
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Reviewed-by: Gary Guo <gary@garyguo.net>
Reviewed-by: Daniel Almeida <daniel.almeida@collabora.com>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Link: https://patch.msgid.link/20251218-inline-lock-unlock-v2-1-fbadac8bd61b@google.com
---
rust/kernel/sync/lock.rs | 7 +++++++
rust/kernel/sync/lock/global.rs | 2 ++
rust/kernel/sync/lock/mutex.rs | 5 +++++
rust/kernel/sync/lock/spinlock.rs | 5 +++++
4 files changed, 19 insertions(+)
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index 46a57d1..10b6b5e 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -156,6 +156,7 @@ impl<B: Backend> Lock<(), B> {
/// the whole lifetime of `'a`.
///
/// [`State`]: Backend::State
+ #[inline]
pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
// SAFETY:
// - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
@@ -169,6 +170,7 @@ impl<B: Backend> Lock<(), B> {
impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Acquires the lock and gives the caller access to the data protected by it.
+ #[inline]
pub fn lock(&self) -> Guard<'_, T, B> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
@@ -182,6 +184,7 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Returns a guard that can be used to access the data protected by the lock if successful.
// `Option<T>` is not `#[must_use]` even if `T` is, thus the attribute is needed here.
#[must_use = "if unused, the lock will be immediately unlocked"]
+ #[inline]
pub fn try_lock(&self) -> Option<Guard<'_, T, B>> {
// SAFETY: The constructor of the type calls `init`, so the existence of the object proves
// that `init` was called.
@@ -275,6 +278,7 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
impl<T: ?Sized, B: Backend> core::ops::Deref for Guard<'_, T, B> {
type Target = T;
+ #[inline]
fn deref(&self) -> &Self::Target {
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
unsafe { &*self.lock.data.get() }
@@ -285,6 +289,7 @@ impl<T: ?Sized, B: Backend> core::ops::DerefMut for Guard<'_, T, B>
where
T: Unpin,
{
+ #[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
// SAFETY: The caller owns the lock, so it is safe to deref the protected data.
unsafe { &mut *self.lock.data.get() }
@@ -292,6 +297,7 @@ where
}
impl<T: ?Sized, B: Backend> Drop for Guard<'_, T, B> {
+ #[inline]
fn drop(&mut self) {
// SAFETY: The caller owns the lock, so it is safe to unlock it.
unsafe { B::unlock(self.lock.state.get(), &self.state) };
@@ -304,6 +310,7 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
/// # Safety
///
/// The caller must ensure that it owns the lock.
+ #[inline]
pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
// SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
unsafe { B::assert_is_held(lock.state.get()) };
diff --git a/rust/kernel/sync/lock/global.rs b/rust/kernel/sync/lock/global.rs
index eab4810..aecbdc3 100644
--- a/rust/kernel/sync/lock/global.rs
+++ b/rust/kernel/sync/lock/global.rs
@@ -77,6 +77,7 @@ impl<B: GlobalLockBackend> GlobalLock<B> {
}
/// Lock this global lock.
+ #[inline]
pub fn lock(&'static self) -> GlobalGuard<B> {
GlobalGuard {
inner: self.inner.lock(),
@@ -84,6 +85,7 @@ impl<B: GlobalLockBackend> GlobalLock<B> {
}
/// Try to lock this global lock.
+ #[inline]
pub fn try_lock(&'static self) -> Option<GlobalGuard<B>> {
Some(GlobalGuard {
inner: self.inner.try_lock()?,
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 581cee7..cda0203 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -102,6 +102,7 @@ unsafe impl super::Backend for MutexBackend {
type State = bindings::mutex;
type GuardState = ();
+ #[inline]
unsafe fn init(
ptr: *mut Self::State,
name: *const crate::ffi::c_char,
@@ -112,18 +113,21 @@ unsafe impl super::Backend for MutexBackend {
unsafe { bindings::__mutex_init(ptr, name, key) }
}
+ #[inline]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
// SAFETY: The safety requirements of this function ensure that `ptr` points to valid
// memory, and that it has been initialised before.
unsafe { bindings::mutex_lock(ptr) };
}
+ #[inline]
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
// caller is the owner of the mutex.
unsafe { bindings::mutex_unlock(ptr) };
}
+ #[inline]
unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
let result = unsafe { bindings::mutex_trylock(ptr) };
@@ -135,6 +139,7 @@ unsafe impl super::Backend for MutexBackend {
}
}
+ #[inline]
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::mutex_assert_is_held(ptr) }
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index d7be38c..ef76fa0 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -101,6 +101,7 @@ unsafe impl super::Backend for SpinLockBackend {
type State = bindings::spinlock_t;
type GuardState = ();
+ #[inline]
unsafe fn init(
ptr: *mut Self::State,
name: *const crate::ffi::c_char,
@@ -111,18 +112,21 @@ unsafe impl super::Backend for SpinLockBackend {
unsafe { bindings::__spin_lock_init(ptr, name, key) }
}
+ #[inline]
unsafe fn lock(ptr: *mut Self::State) -> Self::GuardState {
// SAFETY: The safety requirements of this function ensure that `ptr` points to valid
// memory, and that it has been initialised before.
unsafe { bindings::spin_lock(ptr) }
}
+ #[inline]
unsafe fn unlock(ptr: *mut Self::State, _guard_state: &Self::GuardState) {
// SAFETY: The safety requirements of this function ensure that `ptr` is valid and that the
// caller is the owner of the spinlock.
unsafe { bindings::spin_unlock(ptr) }
}
+ #[inline]
unsafe fn try_lock(ptr: *mut Self::State) -> Option<Self::GuardState> {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
let result = unsafe { bindings::spin_trylock(ptr) };
@@ -134,6 +138,7 @@ unsafe impl super::Backend for SpinLockBackend {
}
}
+ #[inline]
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::spin_assert_is_held(ptr) }
© 2016 - 2026 Red Hat, Inc.