From: David Laight <david.laight.linux@gmail.com>
The vcpu_is_preempted() test stops osq_lock() spinning if a virtual
CPU is no longer running.
Although patched out for bare-metal, the code still needs the CPU number.
Reading this from 'prev->cpu' is a pretty much guaranteed have a cache miss
when osq_unlock() is waking up the next cpu.
Instead save 'prev->cpu' in 'node->prev_cpu' and use that value instead.
Update in the osq_lock() 'unqueue' path when 'node->prev' is changed.
Signed-off-by: David Laight <david.laight.linux@gmail.com>
---
kernel/locking/osq_lock.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 96c6094157b5..0e1c7d11b6c0 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -16,6 +16,7 @@ struct optimistic_spin_node {
struct optimistic_spin_node *next, *prev;
int locked; /* 1 if lock acquired */
int cpu; /* encoded CPU # + 1 value */
+ int prev_cpu; /* encoded CPU # + 1 value */
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
@@ -29,9 +30,9 @@ static inline int encode_cpu(int cpu_nr)
return cpu_nr + 1;
}
-static inline int node_cpu(struct optimistic_spin_node *node)
+static inline int prev_cpu_nr(struct optimistic_spin_node *node)
{
- return node->cpu - 1;
+ return READ_ONCE(node->prev_cpu) - 1;
}
static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
@@ -110,6 +111,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
if (old == OSQ_UNLOCKED_VAL)
return true;
+ WRITE_ONCE(node->prev_cpu, old);
prev = decode_cpu(old);
node->prev = prev;
node->locked = 0;
@@ -144,7 +146,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* polling, be careful.
*/
if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
- vcpu_is_preempted(node_cpu(node->prev))))
+ vcpu_is_preempted(prev_cpu_nr(node))))
return true;
/* unqueue */
@@ -201,6 +203,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* it will wait in Step-A.
*/
+ WRITE_ONCE(next->prev_cpu, prev->cpu);
WRITE_ONCE(next->prev, prev);
WRITE_ONCE(prev->next, next);
--
2.39.5
On Fri, 6 Mar 2026 22:51:47 +0000
david.laight.linux@gmail.com wrote:
Apologies to Yafang for mistyping his address...
> From: David Laight <david.laight.linux@gmail.com>
>
> The vcpu_is_preempted() test stops osq_lock() spinning if a virtual
> CPU is no longer running.
> Although patched out for bare-metal, the code still needs the CPU number.
> Reading this from 'prev->cpu' is a pretty much guaranteed have a cache miss
> when osq_unlock() is waking up the next cpu.
>
> Instead save 'prev->cpu' in 'node->prev_cpu' and use that value instead.
> Update in the osq_lock() 'unqueue' path when 'node->prev' is changed.
>
> Signed-off-by: David Laight <david.laight.linux@gmail.com>
> ---
> kernel/locking/osq_lock.c | 9 ++++++---
> 1 file changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
> index 96c6094157b5..0e1c7d11b6c0 100644
> --- a/kernel/locking/osq_lock.c
> +++ b/kernel/locking/osq_lock.c
> @@ -16,6 +16,7 @@ struct optimistic_spin_node {
> struct optimistic_spin_node *next, *prev;
> int locked; /* 1 if lock acquired */
> int cpu; /* encoded CPU # + 1 value */
> + int prev_cpu; /* encoded CPU # + 1 value */
> };
>
> static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
> @@ -29,9 +30,9 @@ static inline int encode_cpu(int cpu_nr)
> return cpu_nr + 1;
> }
>
> -static inline int node_cpu(struct optimistic_spin_node *node)
> +static inline int prev_cpu_nr(struct optimistic_spin_node *node)
> {
> - return node->cpu - 1;
> + return READ_ONCE(node->prev_cpu) - 1;
> }
>
> static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
> @@ -110,6 +111,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
> if (old == OSQ_UNLOCKED_VAL)
> return true;
>
> + WRITE_ONCE(node->prev_cpu, old);
> prev = decode_cpu(old);
> node->prev = prev;
> node->locked = 0;
> @@ -144,7 +146,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
> * polling, be careful.
> */
> if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
> - vcpu_is_preempted(node_cpu(node->prev))))
> + vcpu_is_preempted(prev_cpu_nr(node))))
> return true;
>
> /* unqueue */
> @@ -201,6 +203,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
> * it will wait in Step-A.
> */
>
> + WRITE_ONCE(next->prev_cpu, prev->cpu);
> WRITE_ONCE(next->prev, prev);
> WRITE_ONCE(prev->next, next);
>
On Fri, 6 Mar 2026 22:51:47 +0000
david.laight.linux@gmail.com wrote:
Apologies to Yafang for mistyping his address....
(and actually corrected this time - it's getting late)
> From: David Laight <david.laight.linux@gmail.com>
>
> The vcpu_is_preempted() test stops osq_lock() spinning if a virtual
> CPU is no longer running.
> Although patched out for bare-metal, the code still needs the CPU number.
> Reading this from 'prev->cpu' is a pretty much guaranteed have a cache miss
> when osq_unlock() is waking up the next cpu.
>
> Instead save 'prev->cpu' in 'node->prev_cpu' and use that value instead.
> Update in the osq_lock() 'unqueue' path when 'node->prev' is changed.
>
> Signed-off-by: David Laight <david.laight.linux@gmail.com>
> ---
> kernel/locking/osq_lock.c | 9 ++++++---
> 1 file changed, 6 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
> index 96c6094157b5..0e1c7d11b6c0 100644
> --- a/kernel/locking/osq_lock.c
> +++ b/kernel/locking/osq_lock.c
> @@ -16,6 +16,7 @@ struct optimistic_spin_node {
> struct optimistic_spin_node *next, *prev;
> int locked; /* 1 if lock acquired */
> int cpu; /* encoded CPU # + 1 value */
> + int prev_cpu; /* encoded CPU # + 1 value */
> };
>
> static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
> @@ -29,9 +30,9 @@ static inline int encode_cpu(int cpu_nr)
> return cpu_nr + 1;
> }
>
> -static inline int node_cpu(struct optimistic_spin_node *node)
> +static inline int prev_cpu_nr(struct optimistic_spin_node *node)
> {
> - return node->cpu - 1;
> + return READ_ONCE(node->prev_cpu) - 1;
> }
>
> static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
> @@ -110,6 +111,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
> if (old == OSQ_UNLOCKED_VAL)
> return true;
>
> + WRITE_ONCE(node->prev_cpu, old);
> prev = decode_cpu(old);
> node->prev = prev;
> node->locked = 0;
> @@ -144,7 +146,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
> * polling, be careful.
> */
> if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
> - vcpu_is_preempted(node_cpu(node->prev))))
> + vcpu_is_preempted(prev_cpu_nr(node))))
> return true;
>
> /* unqueue */
> @@ -201,6 +203,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
> * it will wait in Step-A.
> */
>
> + WRITE_ONCE(next->prev_cpu, prev->cpu);
> WRITE_ONCE(next->prev, prev);
> WRITE_ONCE(prev->next, next);
>
© 2016 - 2026 Red Hat, Inc.