To support numa-aware osq lock, the struct optimistic_spin_queue
is accessed as three members, numa_enable, index, tail16, by union.
The size of the struct is the same as before.
If dynamic numa-ware lock enable, turns to the crossing, x_osq_lock to
check contention level and starts dynamic switch.
Signed-off-by: yongli-oc <yongli-oc@zhaoxin.com>
---
include/linux/osq_lock.h | 33 ++++++++++++++++++++-
kernel/locking/osq_lock.c | 62 +++++++++++++++++++++++++++++++++++++--
2 files changed, 91 insertions(+), 4 deletions(-)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index ea8fb31379e3..37a7bc4ab530 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -12,14 +12,42 @@ struct optimistic_spin_queue {
* Stores an encoded value of the CPU # of the tail node in the queue.
* If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
*/
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA
+ union {
+ atomic_t tail;
+ u32 val;
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u16 tail16;
+ u8 index;
+ u8 numa_enable;
+ };
+#else
+ struct {
+ u8 numa_enable;
+ u8 index;
+ u16 tail16;
+ };
+#endif
+ };
+#else
atomic_t tail;
+#endif
};
#define OSQ_UNLOCKED_VAL (0)
/* Init macro and function. */
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA
+
+#define OSQ_LOCK_UNLOCKED { .tail = ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
+
+#else
+
#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
+#endif
+
static inline void osq_lock_init(struct optimistic_spin_queue *lock)
{
atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
@@ -28,9 +56,12 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
extern bool osq_lock(struct optimistic_spin_queue *lock);
extern void osq_unlock(struct optimistic_spin_queue *lock);
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA
+extern bool osq_is_locked(struct optimistic_spin_queue *lock);
+#else
static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
{
return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
}
-
+#endif
#endif
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 75a6f6133866..a7b516939e00 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -2,7 +2,10 @@
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/osq_lock.h>
-
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA
+#include "numa.h"
+#include "numa_osq.h"
+#endif
/*
* An MCS like lock especially tailored for optimistic spinning for sleeping
* lock implementations (mutex, rwsem, etc).
@@ -12,12 +15,34 @@
* spinning.
*/
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA
+DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
+/*
+ * We use the value 0 to represent "no CPU", thus the encoded value
+ * will be the CPU number incremented by 1.
+ */
+inline int encode_cpu(int cpu_nr)
+{
+ return cpu_nr + 1;
+}
+
+inline int node_cpu(struct optimistic_spin_node *node)
+{
+ return node->cpu - 1;
+}
+
+inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
+{
+ int cpu_nr = encoded_cpu_val - 1;
+
+ return per_cpu_ptr(&osq_node, cpu_nr);
+}
+#else
struct optimistic_spin_node {
struct optimistic_spin_node *next, *prev;
int locked; /* 1 if lock acquired */
int cpu; /* encoded CPU # + 1 value */
};
-
static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
/*
@@ -40,6 +65,7 @@ static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
return per_cpu_ptr(&osq_node, cpu_nr);
}
+#endif
/*
* Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
@@ -97,6 +123,14 @@ bool osq_lock(struct optimistic_spin_queue *lock)
int curr = encode_cpu(smp_processor_id());
int old;
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA
+ if (unlikely(enable_zx_numa_osq_lock > 1)) {
+ node->numa = 1;
+ return x_osq_lock(lock);
+ }
+ node->numa = 0;
+#endif
+
node->locked = 0;
node->next = NULL;
node->cpu = curr;
@@ -108,6 +142,11 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* the lock tail.
*/
old = atomic_xchg(&lock->tail, curr);
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA
+ if (enable_zx_numa_osq_lock > 0)
+ //enable means all cpu cores are less tan 65534.
+ old = old & 0xffff;
+#endif
if (old == OSQ_UNLOCKED_VAL)
return true;
@@ -212,6 +251,14 @@ void osq_unlock(struct optimistic_spin_queue *lock)
struct optimistic_spin_node *node, *next;
int curr = encode_cpu(smp_processor_id());
+ node = this_cpu_ptr(&osq_node);
+
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA
+ if (unlikely(enable_zx_numa_osq_lock > 1 &&
+ node->numa == 1))
+ return x_osq_unlock(lock);
+#endif
+
/*
* Fast path for the uncontended case.
*/
@@ -222,7 +269,6 @@ void osq_unlock(struct optimistic_spin_queue *lock)
/*
* Second most likely case.
*/
- node = this_cpu_ptr(&osq_node);
next = xchg(&node->next, NULL);
if (next) {
WRITE_ONCE(next->locked, 1);
@@ -233,3 +279,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
if (next)
WRITE_ONCE(next->locked, 1);
}
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA
+bool osq_is_locked(struct optimistic_spin_queue *lock)
+{
+ if (unlikely(enable_zx_numa_osq_lock > 1))
+ return x_osq_is_locked(lock);
+ return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
+}
+#endif
+
+
--
2.34.1
On 9/14/24 04:53, yongli-oc wrote: > To support numa-aware osq lock, the struct optimistic_spin_queue > is accessed as three members, numa_enable, index, tail16, by union. > The size of the struct is the same as before. > If dynamic numa-ware lock enable, turns to the crossing, x_osq_lock to > check contention level and starts dynamic switch. > > Signed-off-by: yongli-oc <yongli-oc@zhaoxin.com> > --- > include/linux/osq_lock.h | 33 ++++++++++++++++++++- > kernel/locking/osq_lock.c | 62 +++++++++++++++++++++++++++++++++++++-- > 2 files changed, 91 insertions(+), 4 deletions(-) > > diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h > index ea8fb31379e3..37a7bc4ab530 100644 > --- a/include/linux/osq_lock.h > +++ b/include/linux/osq_lock.h > @@ -12,14 +12,42 @@ struct optimistic_spin_queue { > * Stores an encoded value of the CPU # of the tail node in the queue. > * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL. > */ > +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA > + union { > + atomic_t tail; > + u32 val; > +#ifdef __LITTLE_ENDIAN > + struct { > + u16 tail16; > + u8 index; > + u8 numa_enable; > + }; > +#else > + struct { > + u8 numa_enable; > + u8 index; > + u16 tail16; > + }; > +#endif > + }; > +#else > atomic_t tail; > +#endif > }; > > #define OSQ_UNLOCKED_VAL (0) > > /* Init macro and function. */ > +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA > + > +#define OSQ_LOCK_UNLOCKED { .tail = ATOMIC_INIT(OSQ_UNLOCKED_VAL) } > + > +#else > + > #define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } > > +#endif > + > static inline void osq_lock_init(struct optimistic_spin_queue *lock) > { > atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); > @@ -28,9 +56,12 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock) > extern bool osq_lock(struct optimistic_spin_queue *lock); > extern void osq_unlock(struct optimistic_spin_queue *lock); > > +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA > +extern bool osq_is_locked(struct optimistic_spin_queue *lock); > +#else > static inline bool osq_is_locked(struct optimistic_spin_queue *lock) > { > return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL; > } > - > +#endif > #endif > diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c > index 75a6f6133866..a7b516939e00 100644 > --- a/kernel/locking/osq_lock.c > +++ b/kernel/locking/osq_lock.c > @@ -2,7 +2,10 @@ > #include <linux/percpu.h> > #include <linux/sched.h> > #include <linux/osq_lock.h> > - > +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA > +#include "numa.h" > +#include "numa_osq.h" > +#endif These header files are defined in patch 3. You need to rethink about patch ordering in order not to break bisection. > /* > * An MCS like lock especially tailored for optimistic spinning for sleeping > * lock implementations (mutex, rwsem, etc). > @@ -12,12 +15,34 @@ > * spinning. > */ > > +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA > +DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); > +/* > + * We use the value 0 to represent "no CPU", thus the encoded value > + * will be the CPU number incremented by 1. > + */ > +inline int encode_cpu(int cpu_nr) > +{ > + return cpu_nr + 1; > +} > + > +inline int node_cpu(struct optimistic_spin_node *node) > +{ > + return node->cpu - 1; > +} > + > +inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) > +{ > + int cpu_nr = encoded_cpu_val - 1; > + > + return per_cpu_ptr(&osq_node, cpu_nr); > +} > +#else > struct optimistic_spin_node { > struct optimistic_spin_node *next, *prev; > int locked; /* 1 if lock acquired */ > int cpu; /* encoded CPU # + 1 value */ > }; > - > static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); > > /* > @@ -40,6 +65,7 @@ static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) > > return per_cpu_ptr(&osq_node, cpu_nr); > } > +#endif > > /* > * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. > @@ -97,6 +123,14 @@ bool osq_lock(struct optimistic_spin_queue *lock) > int curr = encode_cpu(smp_processor_id()); > int old; > > +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA > + if (unlikely(enable_zx_numa_osq_lock > 1)) { > + node->numa = 1; > + return x_osq_lock(lock); > + } > + node->numa = 0; > +#endif > + > node->locked = 0; > node->next = NULL; > node->cpu = curr; > @@ -108,6 +142,11 @@ bool osq_lock(struct optimistic_spin_queue *lock) > * the lock tail. > */ > old = atomic_xchg(&lock->tail, curr); > +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA > + if (enable_zx_numa_osq_lock > 0) > + //enable means all cpu cores are less tan 65534. > + old = old & 0xffff; > +#endif > if (old == OSQ_UNLOCKED_VAL) > return true; > > @@ -212,6 +251,14 @@ void osq_unlock(struct optimistic_spin_queue *lock) > struct optimistic_spin_node *node, *next; > int curr = encode_cpu(smp_processor_id()); > > + node = this_cpu_ptr(&osq_node); > + > +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA > + if (unlikely(enable_zx_numa_osq_lock > 1 && > + node->numa == 1)) > + return x_osq_unlock(lock); > +#endif > + > /* > * Fast path for the uncontended case. > */ > @@ -222,7 +269,6 @@ void osq_unlock(struct optimistic_spin_queue *lock) > /* > * Second most likely case. > */ > - node = this_cpu_ptr(&osq_node); > next = xchg(&node->next, NULL); > if (next) { > WRITE_ONCE(next->locked, 1); > @@ -233,3 +279,13 @@ void osq_unlock(struct optimistic_spin_queue *lock) > if (next) > WRITE_ONCE(next->locked, 1); > } > +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA > +bool osq_is_locked(struct optimistic_spin_queue *lock) > +{ > + if (unlikely(enable_zx_numa_osq_lock > 1)) > + return x_osq_is_locked(lock); > + return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL; > +} > +#endif > + > +
On 2024/9/15 00:06, Waiman Long wrote: > > > [这封邮件来自外部发件人 谨防风险] > > On 9/14/24 04:53, yongli-oc wrote: >> To support numa-aware osq lock, the struct optimistic_spin_queue >> is accessed as three members, numa_enable, index, tail16, by union. >> The size of the struct is the same as before. >> If dynamic numa-ware lock enable, turns to the crossing, x_osq_lock to >> check contention level and starts dynamic switch. >> >> Signed-off-by: yongli-oc <yongli-oc@zhaoxin.com> >> --- >> include/linux/osq_lock.h | 33 ++++++++++++++++++++- >> kernel/locking/osq_lock.c | 62 +++++++++++++++++++++++++++++++++++++-- >> 2 files changed, 91 insertions(+), 4 deletions(-) >> >> diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h >> index ea8fb31379e3..37a7bc4ab530 100644 >> --- a/include/linux/osq_lock.h >> +++ b/include/linux/osq_lock.h >> @@ -12,14 +12,42 @@ struct optimistic_spin_queue { >> * Stores an encoded value of the CPU # of the tail node in the >> queue. >> * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL. >> */ >> +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA >> + union { >> + atomic_t tail; >> + u32 val; >> +#ifdef __LITTLE_ENDIAN >> + struct { >> + u16 tail16; >> + u8 index; >> + u8 numa_enable; >> + }; >> +#else >> + struct { >> + u8 numa_enable; >> + u8 index; >> + u16 tail16; >> + }; >> +#endif >> + }; >> +#else >> atomic_t tail; >> +#endif >> }; >> >> #define OSQ_UNLOCKED_VAL (0) >> >> /* Init macro and function. */ >> +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA >> + >> +#define OSQ_LOCK_UNLOCKED { .tail = ATOMIC_INIT(OSQ_UNLOCKED_VAL) } >> + >> +#else >> + >> #define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) } >> >> +#endif >> + >> static inline void osq_lock_init(struct optimistic_spin_queue *lock) >> { >> atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); >> @@ -28,9 +56,12 @@ static inline void osq_lock_init(struct >> optimistic_spin_queue *lock) >> extern bool osq_lock(struct optimistic_spin_queue *lock); >> extern void osq_unlock(struct optimistic_spin_queue *lock); >> >> +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA >> +extern bool osq_is_locked(struct optimistic_spin_queue *lock); >> +#else >> static inline bool osq_is_locked(struct optimistic_spin_queue *lock) >> { >> return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL; >> } >> - >> +#endif >> #endif >> diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c >> index 75a6f6133866..a7b516939e00 100644 >> --- a/kernel/locking/osq_lock.c >> +++ b/kernel/locking/osq_lock.c >> @@ -2,7 +2,10 @@ >> #include <linux/percpu.h> >> #include <linux/sched.h> >> #include <linux/osq_lock.h> >> - >> +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA >> +#include "numa.h" >> +#include "numa_osq.h" >> +#endif > > These header files are defined in patch 3. You need to rethink about > patch ordering in order not to break bisection. I will move it to the patch 2. > >> /* >> * An MCS like lock especially tailored for optimistic spinning for >> sleeping >> * lock implementations (mutex, rwsem, etc). >> @@ -12,12 +15,34 @@ >> * spinning. >> */ >> >> +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA >> +DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); >> +/* >> + * We use the value 0 to represent "no CPU", thus the encoded value >> + * will be the CPU number incremented by 1. >> + */ >> +inline int encode_cpu(int cpu_nr) >> +{ >> + return cpu_nr + 1; >> +} >> + >> +inline int node_cpu(struct optimistic_spin_node *node) >> +{ >> + return node->cpu - 1; >> +} >> + >> +inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) >> +{ >> + int cpu_nr = encoded_cpu_val - 1; >> + >> + return per_cpu_ptr(&osq_node, cpu_nr); >> +} >> +#else >> struct optimistic_spin_node { >> struct optimistic_spin_node *next, *prev; >> int locked; /* 1 if lock acquired */ >> int cpu; /* encoded CPU # + 1 value */ >> }; >> - >> static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, >> osq_node); >> >> /* >> @@ -40,6 +65,7 @@ static inline struct optimistic_spin_node >> *decode_cpu(int encoded_cpu_val) >> >> return per_cpu_ptr(&osq_node, cpu_nr); >> } >> +#endif >> >> /* >> * Get a stable @node->next pointer, either for unlock() or >> unqueue() purposes. >> @@ -97,6 +123,14 @@ bool osq_lock(struct optimistic_spin_queue *lock) >> int curr = encode_cpu(smp_processor_id()); >> int old; >> >> +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA >> + if (unlikely(enable_zx_numa_osq_lock > 1)) { >> + node->numa = 1; >> + return x_osq_lock(lock); >> + } >> + node->numa = 0; >> +#endif >> + >> node->locked = 0; >> node->next = NULL; >> node->cpu = curr; >> @@ -108,6 +142,11 @@ bool osq_lock(struct optimistic_spin_queue *lock) >> * the lock tail. >> */ >> old = atomic_xchg(&lock->tail, curr); >> +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA >> + if (enable_zx_numa_osq_lock > 0) >> + //enable means all cpu cores are less tan 65534. >> + old = old & 0xffff; >> +#endif >> if (old == OSQ_UNLOCKED_VAL) >> return true; >> >> @@ -212,6 +251,14 @@ void osq_unlock(struct optimistic_spin_queue *lock) >> struct optimistic_spin_node *node, *next; >> int curr = encode_cpu(smp_processor_id()); >> >> + node = this_cpu_ptr(&osq_node); >> + >> +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA >> + if (unlikely(enable_zx_numa_osq_lock > 1 && >> + node->numa == 1)) >> + return x_osq_unlock(lock); >> +#endif >> + >> /* >> * Fast path for the uncontended case. >> */ >> @@ -222,7 +269,6 @@ void osq_unlock(struct optimistic_spin_queue *lock) >> /* >> * Second most likely case. >> */ >> - node = this_cpu_ptr(&osq_node); >> next = xchg(&node->next, NULL); >> if (next) { >> WRITE_ONCE(next->locked, 1); >> @@ -233,3 +279,13 @@ void osq_unlock(struct optimistic_spin_queue *lock) >> if (next) >> WRITE_ONCE(next->locked, 1); >> } >> +#ifdef CONFIG_LOCK_SPIN_ON_OWNER_NUMA >> +bool osq_is_locked(struct optimistic_spin_queue *lock) >> +{ >> + if (unlikely(enable_zx_numa_osq_lock > 1)) >> + return x_osq_is_locked(lock); >> + return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL; >> +} >> +#endif >> + >> + >
© 2016 - 2024 Red Hat, Inc.