[PATCH] locking/osq_lock: Use try_cmpxchg() family of functions instead of cmpxchg()

Uros Bizjak posted 1 patch 1 year, 4 months ago
kernel/locking/osq_lock.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
[PATCH] locking/osq_lock: Use try_cmpxchg() family of functions instead of cmpxchg()
Posted by Uros Bizjak 1 year, 4 months ago
Replace this pattern in osq_lock.c:

    cmpxchg(*ptr, old, new) == old

... with the simpler and faster:

    try_cmpxchg(*ptr, &old, new)

The x86 CMPXCHG instruction returns success in the ZF flag, so this change
saves a compare after the CMPXCHG.  As an example, the code in osq_unlock()
improves from:

 11b:	31 c9                	xor    %ecx,%ecx
 11d:	8d 50 01             	lea    0x1(%rax),%edx
 120:	89 d0                	mov    %edx,%eax
 122:	f0 0f b1 0f          	lock cmpxchg %ecx,(%rdi)
 126:	39 c2                	cmp    %eax,%edx
 128:	75 05                	jne    12f <...>

to:

 12b:	31 d2                	xor    %edx,%edx
 12d:	83 c0 01             	add    $0x1,%eax
 130:	f0 0f b1 17          	lock cmpxchg %edx,(%rdi)
 134:	75 05                	jne    13b <...>

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
---
 kernel/locking/osq_lock.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 75a6f6133866..4f89ac8e6a69 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -58,8 +58,10 @@ osq_wait_next(struct optimistic_spin_queue *lock,
 	int curr = encode_cpu(smp_processor_id());
 
 	for (;;) {
-		if (atomic_read(&lock->tail) == curr &&
-		    atomic_cmpxchg_acquire(&lock->tail, curr, old_cpu) == curr) {
+		int tmp = curr;
+
+		if (atomic_read(&lock->tail) == tmp &&
+		    atomic_try_cmpxchg_acquire(&lock->tail, &tmp, old_cpu)) {
 			/*
 			 * We were the last queued, we moved @lock back. @prev
 			 * will now observe @lock and will complete its
@@ -157,12 +159,14 @@ bool osq_lock(struct optimistic_spin_queue *lock)
 	 */
 
 	for (;;) {
+		struct optimistic_spin_node *tmp = node;
+
 		/*
 		 * cpu_relax() below implies a compiler barrier which would
 		 * prevent this comparison being optimized away.
 		 */
-		if (data_race(prev->next) == node &&
-		    cmpxchg(&prev->next, node, NULL) == node)
+		if (data_race(prev->next) == tmp &&
+		    try_cmpxchg(&prev->next, &tmp, NULL))
 			break;
 
 		/*
@@ -215,8 +219,7 @@ void osq_unlock(struct optimistic_spin_queue *lock)
 	/*
 	 * Fast path for the uncontended case.
 	 */
-	if (likely(atomic_cmpxchg_release(&lock->tail, curr,
-					  OSQ_UNLOCKED_VAL) == curr))
+	if (atomic_try_cmpxchg_release(&lock->tail, &curr, OSQ_UNLOCKED_VAL))
 		return;
 
 	/*
-- 
2.42.0
Re: [PATCH] locking/osq_lock: Use try_cmpxchg() family of functions instead of cmpxchg()
Posted by Uros Bizjak 1 year, 4 months ago
Dear locking maintainers,

I would like to ping for this patch. The patch [1] brings several benefits:
- faster and more compact code for x86 target (as demonstrated in the
commit message)
- better code also for other targets (due to correct likely/unlikely usage)
- prevents logic errors with wrong compare variable

[1] https://lore.kernel.org/lkml/20240719112130.59260-1-ubizjak@gmail.com/

Thanks,
Uros.

On Fri, Jul 19, 2024 at 1:21 PM Uros Bizjak <ubizjak@gmail.com> wrote:
>
> Replace this pattern in osq_lock.c:
>
>     cmpxchg(*ptr, old, new) == old
>
> ... with the simpler and faster:
>
>     try_cmpxchg(*ptr, &old, new)
>
> The x86 CMPXCHG instruction returns success in the ZF flag, so this change
> saves a compare after the CMPXCHG.  As an example, the code in osq_unlock()
> improves from:
>
>  11b:   31 c9                   xor    %ecx,%ecx
>  11d:   8d 50 01                lea    0x1(%rax),%edx
>  120:   89 d0                   mov    %edx,%eax
>  122:   f0 0f b1 0f             lock cmpxchg %ecx,(%rdi)
>  126:   39 c2                   cmp    %eax,%edx
>  128:   75 05                   jne    12f <...>
>
> to:
>
>  12b:   31 d2                   xor    %edx,%edx
>  12d:   83 c0 01                add    $0x1,%eax
>  130:   f0 0f b1 17             lock cmpxchg %edx,(%rdi)
>  134:   75 05                   jne    13b <...>
>
> Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Ingo Molnar <mingo@kernel.org>
> Cc: Will Deacon <will@kernel.org>
> Cc: Waiman Long <longman@redhat.com>
> Cc: Boqun Feng <boqun.feng@gmail.com>
> ---
>  kernel/locking/osq_lock.c | 15 +++++++++------
>  1 file changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
> index 75a6f6133866..4f89ac8e6a69 100644
> --- a/kernel/locking/osq_lock.c
> +++ b/kernel/locking/osq_lock.c
> @@ -58,8 +58,10 @@ osq_wait_next(struct optimistic_spin_queue *lock,
>         int curr = encode_cpu(smp_processor_id());
>
>         for (;;) {
> -               if (atomic_read(&lock->tail) == curr &&
> -                   atomic_cmpxchg_acquire(&lock->tail, curr, old_cpu) == curr) {
> +               int tmp = curr;
> +
> +               if (atomic_read(&lock->tail) == tmp &&
> +                   atomic_try_cmpxchg_acquire(&lock->tail, &tmp, old_cpu)) {
>                         /*
>                          * We were the last queued, we moved @lock back. @prev
>                          * will now observe @lock and will complete its
> @@ -157,12 +159,14 @@ bool osq_lock(struct optimistic_spin_queue *lock)
>          */
>
>         for (;;) {
> +               struct optimistic_spin_node *tmp = node;
> +
>                 /*
>                  * cpu_relax() below implies a compiler barrier which would
>                  * prevent this comparison being optimized away.
>                  */
> -               if (data_race(prev->next) == node &&
> -                   cmpxchg(&prev->next, node, NULL) == node)
> +               if (data_race(prev->next) == tmp &&
> +                   try_cmpxchg(&prev->next, &tmp, NULL))
>                         break;
>
>                 /*
> @@ -215,8 +219,7 @@ void osq_unlock(struct optimistic_spin_queue *lock)
>         /*
>          * Fast path for the uncontended case.
>          */
> -       if (likely(atomic_cmpxchg_release(&lock->tail, curr,
> -                                         OSQ_UNLOCKED_VAL) == curr))
> +       if (atomic_try_cmpxchg_release(&lock->tail, &curr, OSQ_UNLOCKED_VAL))
>                 return;
>
>         /*
> --
> 2.42.0
>