Most Linux-kernel uses of locking are straightforward, but there are
corner-case uses that rely on less well-known aspects of the lock and
unlock primitives. This commit therefore adds a locking.txt and litmus
tests in Documentation/litmus-tests/locking to explain these corner-case
uses.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
.../litmus-tests/locking/DCL-broken.litmus | 55 +++
.../litmus-tests/locking/DCL-fixed.litmus | 56 +++
.../litmus-tests/locking/RM-broken.litmus | 42 +++
.../litmus-tests/locking/RM-fixed.litmus | 42 +++
tools/memory-model/Documentation/locking.txt | 320 ++++++++++++++++++
5 files changed, 515 insertions(+)
create mode 100644 Documentation/litmus-tests/locking/DCL-broken.litmus
create mode 100644 Documentation/litmus-tests/locking/DCL-fixed.litmus
create mode 100644 Documentation/litmus-tests/locking/RM-broken.litmus
create mode 100644 Documentation/litmus-tests/locking/RM-fixed.litmus
create mode 100644 tools/memory-model/Documentation/locking.txt
diff --git a/Documentation/litmus-tests/locking/DCL-broken.litmus b/Documentation/litmus-tests/locking/DCL-broken.litmus
new file mode 100644
index 000000000000..cfaa25ff82b1
--- /dev/null
+++ b/Documentation/litmus-tests/locking/DCL-broken.litmus
@@ -0,0 +1,55 @@
+C DCL-broken
+
+(*
+ * Result: Sometimes
+ *
+ * This litmus test demonstrates more than just locking is required to
+ * correctly implement double-checked locking.
+ *)
+
+{
+ int flag;
+ int data;
+ int lck;
+}
+
+P0(int *flag, int *data, int *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ WRITE_ONCE(*flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+P1(int *flag, int *data, int *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ WRITE_ONCE(*flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+locations [flag;data;lck;0:r0;0:r1;1:r0;1:r1]
+exists (0:r2=0 \/ 1:r2=0)
diff --git a/Documentation/litmus-tests/locking/DCL-fixed.litmus b/Documentation/litmus-tests/locking/DCL-fixed.litmus
new file mode 100644
index 000000000000..579d6c246f16
--- /dev/null
+++ b/Documentation/litmus-tests/locking/DCL-fixed.litmus
@@ -0,0 +1,56 @@
+C DCL-fixed
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that double-checked locking can be
+ * reliable given proper use of smp_load_acquire() and smp_store_release()
+ * in addition to the locking.
+ *)
+
+{
+ int flag;
+ int data;
+ int lck;
+}
+
+P0(int *flag, int *data, int *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = smp_load_acquire(flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ smp_store_release(flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+P1(int *flag, int *data, int *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = smp_load_acquire(flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ smp_store_release(flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+}
+
+locations [flag;data;lck;0:r0;0:r1;1:r0;1:r1]
+exists (0:r2=0 \/ 1:r2=0)
diff --git a/Documentation/litmus-tests/locking/RM-broken.litmus b/Documentation/litmus-tests/locking/RM-broken.litmus
new file mode 100644
index 000000000000..c586ae4b547d
--- /dev/null
+++ b/Documentation/litmus-tests/locking/RM-broken.litmus
@@ -0,0 +1,42 @@
+C RM-broken
+
+(*
+ * Result: DEADLOCK
+ *
+ * This litmus test demonstrates that the old "roach motel" approach
+ * to locking, where code can be freely moved into critical sections,
+ * cannot be used in the Linux kernel.
+ *)
+
+{
+ int lck;
+ int x;
+ int y;
+}
+
+P0(int *x, int *y, int *lck)
+{
+ int r2;
+
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ WRITE_ONCE(*x, 1);
+ spin_unlock(lck);
+}
+
+P1(int *x, int *y, int *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ spin_lock(lck);
+ r0 = READ_ONCE(*x);
+ r1 = READ_ONCE(*x);
+ r2 = atomic_inc_return(y);
+ spin_unlock(lck);
+}
+
+locations [x;lck;0:r2;1:r0;1:r1;1:r2]
+filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
+exists (1:r2=1)
diff --git a/Documentation/litmus-tests/locking/RM-fixed.litmus b/Documentation/litmus-tests/locking/RM-fixed.litmus
new file mode 100644
index 000000000000..672856736b42
--- /dev/null
+++ b/Documentation/litmus-tests/locking/RM-fixed.litmus
@@ -0,0 +1,42 @@
+C RM-fixed
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that the old "roach motel" approach
+ * to locking, where code can be freely moved into critical sections,
+ * cannot be used in the Linux kernel.
+ *)
+
+{
+ int lck;
+ int x;
+ int y;
+}
+
+P0(int *x, int *y, int *lck)
+{
+ int r2;
+
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ WRITE_ONCE(*x, 1);
+ spin_unlock(lck);
+}
+
+P1(int *x, int *y, int *lck)
+{
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*x);
+ r1 = READ_ONCE(*x);
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ spin_unlock(lck);
+}
+
+locations [x;lck;0:r2;1:r0;1:r1;1:r2]
+filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
+exists (1:r2=1)
diff --git a/tools/memory-model/Documentation/locking.txt b/tools/memory-model/Documentation/locking.txt
new file mode 100644
index 000000000000..4e05c6d53ab7
--- /dev/null
+++ b/tools/memory-model/Documentation/locking.txt
@@ -0,0 +1,320 @@
+Locking
+=======
+
+Locking is well-known and the common use cases are straightforward: Any
+CPU holding a given lock sees any changes previously seen or made by any
+CPU before it previously released that same lock. This last sentence
+is the only part of this document that most developers will need to read.
+
+However, developers who would like to also access lock-protected shared
+variables outside of their corresponding locks should continue reading.
+
+
+Locking and Prior Accesses
+--------------------------
+
+The basic rule of locking is worth repeating:
+
+ Any CPU holding a given lock sees any changes previously seen
+ or made by any CPU before it previously released that same lock.
+
+Note that this statement is a bit stronger than "Any CPU holding a
+given lock sees all changes made by any CPU during the time that CPU was
+previously holding this same lock". For example, consider the following
+pair of code fragments:
+
+ /* See MP+polocks.litmus. */
+ void CPU0(void)
+ {
+ WRITE_ONCE(x, 1);
+ spin_lock(&mylock);
+ WRITE_ONCE(y, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU1(void)
+ {
+ spin_lock(&mylock);
+ r0 = READ_ONCE(y);
+ spin_unlock(&mylock);
+ r1 = READ_ONCE(x);
+ }
+
+The basic rule guarantees that if CPU0() acquires mylock before CPU1(),
+then both r0 and r1 must be set to the value 1. This also has the
+consequence that if the final value of r0 is equal to 1, then the final
+value of r1 must also be equal to 1. In contrast, the weaker rule would
+say nothing about the final value of r1.
+
+
+Locking and Subsequent Accesses
+-------------------------------
+
+The converse to the basic rule also holds: Any CPU holding a given
+lock will not see any changes that will be made by any CPU after it
+subsequently acquires this same lock. This converse statement is
+illustrated by the following litmus test:
+
+ /* See MP+porevlocks.litmus. */
+ void CPU0(void)
+ {
+ r0 = READ_ONCE(y);
+ spin_lock(&mylock);
+ r1 = READ_ONCE(x);
+ spin_unlock(&mylock);
+ }
+
+ void CPU1(void)
+ {
+ spin_lock(&mylock);
+ WRITE_ONCE(x, 1);
+ spin_unlock(&mylock);
+ WRITE_ONCE(y, 1);
+ }
+
+This converse to the basic rule guarantees that if CPU0() acquires
+mylock before CPU1(), then both r0 and r1 must be set to the value 0.
+This also has the consequence that if the final value of r1 is equal
+to 0, then the final value of r0 must also be equal to 0. In contrast,
+the weaker rule would say nothing about the final value of r0.
+
+These examples show only a single pair of CPUs, but the effects of the
+locking basic rule extend across multiple acquisitions of a given lock
+across multiple CPUs.
+
+
+Double-Checked Locking
+----------------------
+
+It is well known that more than just a lock is required to make
+double-checked locking work correctly, This litmus test illustrates
+one incorrect approach:
+
+ /* See Documentation/litmus-tests/locking/DCL-broken.litmus. */
+ P0(int *flag, int *data, int *lck)
+ {
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ WRITE_ONCE(*flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+ }
+ /* P1() is the exactly the same as P0(). */
+
+There are two problems. First, there is no ordering between the first
+READ_ONCE() of "flag" and the READ_ONCE() of "data". Second, there is
+no ordering between the two WRITE_ONCE() calls. It should therefore be
+no surprise that "r2" can be zero, and a quick herd7 run confirms this.
+
+One way to fix this is to use smp_load_acquire() and smp_store_release()
+as shown in this corrected version:
+
+ /* See Documentation/litmus-tests/locking/DCL-fixed.litmus. */
+ P0(int *flag, int *data, int *lck)
+ {
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = smp_load_acquire(flag);
+ if (r0 == 0) {
+ spin_lock(lck);
+ r1 = READ_ONCE(*flag);
+ if (r1 == 0) {
+ WRITE_ONCE(*data, 1);
+ smp_store_release(flag, 1);
+ }
+ spin_unlock(lck);
+ }
+ r2 = READ_ONCE(*data);
+ }
+ /* P1() is the exactly the same as P0(). */
+
+The smp_load_acquire() guarantees that its load from "flags" will
+be ordered before the READ_ONCE() from data, thus solving the first
+problem. The smp_store_release() guarantees that its store will be
+ordered after the WRITE_ONCE() to "data", solving the second problem.
+The smp_store_release() pairs with the smp_load_acquire(), thus ensuring
+that the ordering provided by each actually takes effect. Again, a
+quick herd7 run confirms this.
+
+In short, if you access a lock-protected variable without holding the
+corresponding lock, you will need to provide additional ordering, in
+this case, via the smp_load_acquire() and the smp_store_release().
+
+
+Ordering Provided by a Lock to CPUs Not Holding That Lock
+---------------------------------------------------------
+
+It is not necessarily the case that accesses ordered by locking will be
+seen as ordered by CPUs not holding that lock. Consider this example:
+
+ /* See Z6.0+pooncelock+pooncelock+pombonce.litmus. */
+ void CPU0(void)
+ {
+ spin_lock(&mylock);
+ WRITE_ONCE(x, 1);
+ WRITE_ONCE(y, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU1(void)
+ {
+ spin_lock(&mylock);
+ r0 = READ_ONCE(y);
+ WRITE_ONCE(z, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU2(void)
+ {
+ WRITE_ONCE(z, 2);
+ smp_mb();
+ r1 = READ_ONCE(x);
+ }
+
+Counter-intuitive though it might be, it is quite possible to have
+the final value of r0 be 1, the final value of z be 2, and the final
+value of r1 be 0. The reason for this surprising outcome is that CPU2()
+never acquired the lock, and thus did not fully benefit from the lock's
+ordering properties.
+
+Ordering can be extended to CPUs not holding the lock by careful use
+of smp_mb__after_spinlock():
+
+ /* See Z6.0+pooncelock+poonceLock+pombonce.litmus. */
+ void CPU0(void)
+ {
+ spin_lock(&mylock);
+ WRITE_ONCE(x, 1);
+ WRITE_ONCE(y, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU1(void)
+ {
+ spin_lock(&mylock);
+ smp_mb__after_spinlock();
+ r0 = READ_ONCE(y);
+ WRITE_ONCE(z, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU2(void)
+ {
+ WRITE_ONCE(z, 2);
+ smp_mb();
+ r1 = READ_ONCE(x);
+ }
+
+This addition of smp_mb__after_spinlock() strengthens the lock
+acquisition sufficiently to rule out the counter-intuitive outcome.
+In other words, the addition of the smp_mb__after_spinlock() prohibits
+the counter-intuitive result where the final value of r0 is 1, the final
+value of z is 2, and the final value of r1 is 0.
+
+
+No Roach-Motel Locking!
+-----------------------
+
+This example requires familiarity with the herd7 "filter" clause, so
+please read up on that topic in litmus-tests.txt.
+
+It is tempting to allow memory-reference instructions to be pulled
+into a critical section, but this cannot be allowed in the general case.
+For example, consider a spin loop preceding a lock-based critical section.
+Now, herd7 does not model spin loops, but we can emulate one with two
+loads, with a "filter" clause to constrain the first to return the
+initial value and the second to return the updated value, as shown below:
+
+ /* See Documentation/litmus-tests/locking/RM-fixed.litmus. */
+ P0(int *x, int *y, int *lck)
+ {
+ int r2;
+
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ WRITE_ONCE(*x, 1);
+ spin_unlock(lck);
+ }
+
+ P1(int *x, int *y, int *lck)
+ {
+ int r0;
+ int r1;
+ int r2;
+
+ r0 = READ_ONCE(*x);
+ r1 = READ_ONCE(*x);
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ spin_unlock(lck);
+ }
+
+ filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
+ exists (1:r2=1)
+
+The variable "x" is the control variable for the emulated spin loop.
+P0() sets it to "1" while holding the lock, and P1() emulates the
+spin loop by reading it twice, first into "1:r0" (which should get the
+initial value "0") and then into "1:r1" (which should get the updated
+value "1").
+
+The purpose of the variable "y" is to reject deadlocked executions.
+Only those executions where the final value of "y" have avoided deadlock.
+
+The "filter" clause takes all this into account, constraining "y" to
+equal "2", "1:r0" to equal "0", and "1:r1" to equal 1.
+
+Then the "exists" clause checks to see if P1() acquired its lock first,
+which should not happen given the filter clause because P0() updates
+"x" while holding the lock. And herd7 confirms this.
+
+But suppose that the compiler was permitted to reorder the spin loop
+into P1()'s critical section, like this:
+
+ /* See Documentation/litmus-tests/locking/RM-broken.litmus. */
+ P0(int *x, int *y, int *lck)
+ {
+ int r2;
+
+ spin_lock(lck);
+ r2 = atomic_inc_return(y);
+ WRITE_ONCE(*x, 1);
+ spin_unlock(lck);
+ }
+
+ P1(int *x, int *y, int *lck)
+ {
+ int r0;
+ int r1;
+ int r2;
+
+ spin_lock(lck);
+ r0 = READ_ONCE(*x);
+ r1 = READ_ONCE(*x);
+ r2 = atomic_inc_return(y);
+ spin_unlock(lck);
+ }
+
+ locations [x;lck;0:r2;1:r0;1:r1;1:r2]
+ filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
+ exists (1:r2=1)
+
+If "1:r0" is equal to "0", "1:r1" can never equal "1" because P0()
+cannot update "x" while P1() holds the lock. And herd7 confirms this,
+showing zero executions matching the "filter" criteria.
+
+And this is why Linux-kernel lock and unlock primitives must prevent
+code from entering critical sections. It is not sufficient to only
+prevent code from leaving them.
--
2.40.0.rc2
Hi Paul,
On Mon, 20 Mar 2023 18:05:19 -0700, Paul E. McKenney wrote:
> Most Linux-kernel uses of locking are straightforward, but there are
> corner-case uses that rely on less well-known aspects of the lock and
> unlock primitives. This commit therefore adds a locking.txt and litmus
> tests in Documentation/litmus-tests/locking to explain these corner-case
> uses.
>
> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> ---
> .../litmus-tests/locking/DCL-broken.litmus | 55 +++
> .../litmus-tests/locking/DCL-fixed.litmus | 56 +++
> .../litmus-tests/locking/RM-broken.litmus | 42 +++
> .../litmus-tests/locking/RM-fixed.litmus | 42 +++
> tools/memory-model/Documentation/locking.txt | 320 ++++++++++++++++++
I think the documentation needs adjustment to cope with Andrea's change
of litmus tests.
Also, coding style of code snippets taken from litmus tests look somewhat
inconsistent with other snippets taken from MP+... litmus tests:
- Simple function signature such as "void CPU0(void)".
- No declaration of local variables.
- Indirection level of global variables.
- No "locations" clause
How about applying the diff below?
Thanks, Akira
-----
diff --git a/tools/memory-model/Documentation/locking.txt b/tools/memory-model/Documentation/locking.txt
index 4e05c6d53ab7..65c898c64a93 100644
--- a/tools/memory-model/Documentation/locking.txt
+++ b/tools/memory-model/Documentation/locking.txt
@@ -91,25 +91,21 @@ double-checked locking work correctly, This litmus test illustrates
one incorrect approach:
/* See Documentation/litmus-tests/locking/DCL-broken.litmus. */
- P0(int *flag, int *data, int *lck)
+ void CPU0(void)
{
- int r0;
- int r1;
- int r2;
-
- r0 = READ_ONCE(*flag);
+ r0 = READ_ONCE(flag);
if (r0 == 0) {
- spin_lock(lck);
- r1 = READ_ONCE(*flag);
+ spin_lock(&lck);
+ r1 = READ_ONCE(flag);
if (r1 == 0) {
- WRITE_ONCE(*data, 1);
- WRITE_ONCE(*flag, 1);
+ WRITE_ONCE(data, 1);
+ WRITE_ONCE(flag, 1);
}
- spin_unlock(lck);
+ spin_unlock(&lck);
}
- r2 = READ_ONCE(*data);
+ r2 = READ_ONCE(data);
}
- /* P1() is the exactly the same as P0(). */
+ /* CPU1() is the exactly the same as CPU0(). */
There are two problems. First, there is no ordering between the first
READ_ONCE() of "flag" and the READ_ONCE() of "data". Second, there is
@@ -120,25 +116,21 @@ One way to fix this is to use smp_load_acquire() and smp_store_release()
as shown in this corrected version:
/* See Documentation/litmus-tests/locking/DCL-fixed.litmus. */
- P0(int *flag, int *data, int *lck)
+ void CPU0(void)
{
- int r0;
- int r1;
- int r2;
-
- r0 = smp_load_acquire(flag);
+ r0 = smp_load_acquire(&flag);
if (r0 == 0) {
- spin_lock(lck);
- r1 = READ_ONCE(*flag);
+ spin_lock(&lck);
+ r1 = READ_ONCE(flag);
if (r1 == 0) {
- WRITE_ONCE(*data, 1);
- smp_store_release(flag, 1);
+ WRITE_ONCE(data, 1);
+ smp_store_release(&flag, 1);
}
- spin_unlock(lck);
+ spin_unlock(&lck);
}
- r2 = READ_ONCE(*data);
+ r2 = READ_ONCE(data);
}
- /* P1() is the exactly the same as P0(). */
+ /* CPU1() is the exactly the same as CPU0(). */
The smp_load_acquire() guarantees that its load from "flags" will
be ordered before the READ_ONCE() from data, thus solving the first
@@ -238,81 +230,67 @@ loads, with a "filter" clause to constrain the first to return the
initial value and the second to return the updated value, as shown below:
/* See Documentation/litmus-tests/locking/RM-fixed.litmus. */
- P0(int *x, int *y, int *lck)
+ void CPU0(void)
{
- int r2;
-
- spin_lock(lck);
- r2 = atomic_inc_return(y);
- WRITE_ONCE(*x, 1);
- spin_unlock(lck);
+ spin_lock(&lck);
+ r2 = atomic_inc_return(&y);
+ WRITE_ONCE(x, 1);
+ spin_unlock(&lck);
}
- P1(int *x, int *y, int *lck)
+ void CPU1(void)
{
- int r0;
- int r1;
- int r2;
-
- r0 = READ_ONCE(*x);
- r1 = READ_ONCE(*x);
- spin_lock(lck);
- r2 = atomic_inc_return(y);
- spin_unlock(lck);
+ r0 = READ_ONCE(x);
+ r1 = READ_ONCE(x);
+ spin_lock(&lck);
+ r2 = atomic_inc_return(&y);
+ spin_unlock(&lck);
}
- filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
+ filter (1:r0=0 /\ 1:r1=1)
exists (1:r2=1)
The variable "x" is the control variable for the emulated spin loop.
-P0() sets it to "1" while holding the lock, and P1() emulates the
+CPU0() sets it to "1" while holding the lock, and CPU1() emulates the
spin loop by reading it twice, first into "1:r0" (which should get the
initial value "0") and then into "1:r1" (which should get the updated
value "1").
-The purpose of the variable "y" is to reject deadlocked executions.
-Only those executions where the final value of "y" have avoided deadlock.
+The "filter" clause takes this into account, constraining "1:r0" to
+equal "0" and "1:r1" to equal 1.
-The "filter" clause takes all this into account, constraining "y" to
-equal "2", "1:r0" to equal "0", and "1:r1" to equal 1.
-
-Then the "exists" clause checks to see if P1() acquired its lock first,
-which should not happen given the filter clause because P0() updates
+Then the "exists" clause checks to see if CPU1() acquired its lock first,
+which should not happen given the filter clause because CPU0() updates
"x" while holding the lock. And herd7 confirms this.
But suppose that the compiler was permitted to reorder the spin loop
-into P1()'s critical section, like this:
+into CPU1()'s critical section, like this:
/* See Documentation/litmus-tests/locking/RM-broken.litmus. */
- P0(int *x, int *y, int *lck)
+ void CPU0(void)
{
int r2;
- spin_lock(lck);
- r2 = atomic_inc_return(y);
- WRITE_ONCE(*x, 1);
- spin_unlock(lck);
+ spin_lock(&lck);
+ r2 = atomic_inc_return(&y);
+ WRITE_ONCE(x, 1);
+ spin_unlock(&lck);
}
- P1(int *x, int *y, int *lck)
+ void CPU1(void)
{
- int r0;
- int r1;
- int r2;
-
- spin_lock(lck);
- r0 = READ_ONCE(*x);
- r1 = READ_ONCE(*x);
- r2 = atomic_inc_return(y);
- spin_unlock(lck);
+ spin_lock(&lck);
+ r0 = READ_ONCE(x);
+ r1 = READ_ONCE(x);
+ r2 = atomic_inc_return(&y);
+ spin_unlock(&lck);
}
- locations [x;lck;0:r2;1:r0;1:r1;1:r2]
- filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
+ filter (1:r0=0 /\ 1:r1=1)
exists (1:r2=1)
-If "1:r0" is equal to "0", "1:r1" can never equal "1" because P0()
-cannot update "x" while P1() holds the lock. And herd7 confirms this,
+If "1:r0" is equal to "0", "1:r1" can never equal "1" because CPU0()
+cannot update "x" while CPU1() holds the lock. And herd7 confirms this,
showing zero executions matching the "filter" criteria.
And this is why Linux-kernel lock and unlock primitives must prevent
On Thu, Mar 23, 2023 at 11:52:57AM +0900, Akira Yokosawa wrote:
> Hi Paul,
>
> On Mon, 20 Mar 2023 18:05:19 -0700, Paul E. McKenney wrote:
> > Most Linux-kernel uses of locking are straightforward, but there are
> > corner-case uses that rely on less well-known aspects of the lock and
> > unlock primitives. This commit therefore adds a locking.txt and litmus
> > tests in Documentation/litmus-tests/locking to explain these corner-case
> > uses.
> >
> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> > ---
> > .../litmus-tests/locking/DCL-broken.litmus | 55 +++
> > .../litmus-tests/locking/DCL-fixed.litmus | 56 +++
> > .../litmus-tests/locking/RM-broken.litmus | 42 +++
> > .../litmus-tests/locking/RM-fixed.litmus | 42 +++
> > tools/memory-model/Documentation/locking.txt | 320 ++++++++++++++++++
>
> I think the documentation needs adjustment to cope with Andrea's change
> of litmus tests.
>
> Also, coding style of code snippets taken from litmus tests look somewhat
> inconsistent with other snippets taken from MP+... litmus tests:
>
> - Simple function signature such as "void CPU0(void)".
> - No declaration of local variables.
> - Indirection level of global variables.
> - No "locations" clause
>
> How about applying the diff below?
Good eyes, thank you! I will fold this in with attribution.
Thanx, Paul
> Thanks, Akira
>
> -----
> diff --git a/tools/memory-model/Documentation/locking.txt b/tools/memory-model/Documentation/locking.txt
> index 4e05c6d53ab7..65c898c64a93 100644
> --- a/tools/memory-model/Documentation/locking.txt
> +++ b/tools/memory-model/Documentation/locking.txt
> @@ -91,25 +91,21 @@ double-checked locking work correctly, This litmus test illustrates
> one incorrect approach:
>
> /* See Documentation/litmus-tests/locking/DCL-broken.litmus. */
> - P0(int *flag, int *data, int *lck)
> + void CPU0(void)
> {
> - int r0;
> - int r1;
> - int r2;
> -
> - r0 = READ_ONCE(*flag);
> + r0 = READ_ONCE(flag);
> if (r0 == 0) {
> - spin_lock(lck);
> - r1 = READ_ONCE(*flag);
> + spin_lock(&lck);
> + r1 = READ_ONCE(flag);
> if (r1 == 0) {
> - WRITE_ONCE(*data, 1);
> - WRITE_ONCE(*flag, 1);
> + WRITE_ONCE(data, 1);
> + WRITE_ONCE(flag, 1);
> }
> - spin_unlock(lck);
> + spin_unlock(&lck);
> }
> - r2 = READ_ONCE(*data);
> + r2 = READ_ONCE(data);
> }
> - /* P1() is the exactly the same as P0(). */
> + /* CPU1() is the exactly the same as CPU0(). */
>
> There are two problems. First, there is no ordering between the first
> READ_ONCE() of "flag" and the READ_ONCE() of "data". Second, there is
> @@ -120,25 +116,21 @@ One way to fix this is to use smp_load_acquire() and smp_store_release()
> as shown in this corrected version:
>
> /* See Documentation/litmus-tests/locking/DCL-fixed.litmus. */
> - P0(int *flag, int *data, int *lck)
> + void CPU0(void)
> {
> - int r0;
> - int r1;
> - int r2;
> -
> - r0 = smp_load_acquire(flag);
> + r0 = smp_load_acquire(&flag);
> if (r0 == 0) {
> - spin_lock(lck);
> - r1 = READ_ONCE(*flag);
> + spin_lock(&lck);
> + r1 = READ_ONCE(flag);
> if (r1 == 0) {
> - WRITE_ONCE(*data, 1);
> - smp_store_release(flag, 1);
> + WRITE_ONCE(data, 1);
> + smp_store_release(&flag, 1);
> }
> - spin_unlock(lck);
> + spin_unlock(&lck);
> }
> - r2 = READ_ONCE(*data);
> + r2 = READ_ONCE(data);
> }
> - /* P1() is the exactly the same as P0(). */
> + /* CPU1() is the exactly the same as CPU0(). */
>
> The smp_load_acquire() guarantees that its load from "flags" will
> be ordered before the READ_ONCE() from data, thus solving the first
> @@ -238,81 +230,67 @@ loads, with a "filter" clause to constrain the first to return the
> initial value and the second to return the updated value, as shown below:
>
> /* See Documentation/litmus-tests/locking/RM-fixed.litmus. */
> - P0(int *x, int *y, int *lck)
> + void CPU0(void)
> {
> - int r2;
> -
> - spin_lock(lck);
> - r2 = atomic_inc_return(y);
> - WRITE_ONCE(*x, 1);
> - spin_unlock(lck);
> + spin_lock(&lck);
> + r2 = atomic_inc_return(&y);
> + WRITE_ONCE(x, 1);
> + spin_unlock(&lck);
> }
>
> - P1(int *x, int *y, int *lck)
> + void CPU1(void)
> {
> - int r0;
> - int r1;
> - int r2;
> -
> - r0 = READ_ONCE(*x);
> - r1 = READ_ONCE(*x);
> - spin_lock(lck);
> - r2 = atomic_inc_return(y);
> - spin_unlock(lck);
> + r0 = READ_ONCE(x);
> + r1 = READ_ONCE(x);
> + spin_lock(&lck);
> + r2 = atomic_inc_return(&y);
> + spin_unlock(&lck);
> }
>
> - filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
> + filter (1:r0=0 /\ 1:r1=1)
> exists (1:r2=1)
>
> The variable "x" is the control variable for the emulated spin loop.
> -P0() sets it to "1" while holding the lock, and P1() emulates the
> +CPU0() sets it to "1" while holding the lock, and CPU1() emulates the
> spin loop by reading it twice, first into "1:r0" (which should get the
> initial value "0") and then into "1:r1" (which should get the updated
> value "1").
>
> -The purpose of the variable "y" is to reject deadlocked executions.
> -Only those executions where the final value of "y" have avoided deadlock.
> +The "filter" clause takes this into account, constraining "1:r0" to
> +equal "0" and "1:r1" to equal 1.
>
> -The "filter" clause takes all this into account, constraining "y" to
> -equal "2", "1:r0" to equal "0", and "1:r1" to equal 1.
> -
> -Then the "exists" clause checks to see if P1() acquired its lock first,
> -which should not happen given the filter clause because P0() updates
> +Then the "exists" clause checks to see if CPU1() acquired its lock first,
> +which should not happen given the filter clause because CPU0() updates
> "x" while holding the lock. And herd7 confirms this.
>
> But suppose that the compiler was permitted to reorder the spin loop
> -into P1()'s critical section, like this:
> +into CPU1()'s critical section, like this:
>
> /* See Documentation/litmus-tests/locking/RM-broken.litmus. */
> - P0(int *x, int *y, int *lck)
> + void CPU0(void)
> {
> int r2;
>
> - spin_lock(lck);
> - r2 = atomic_inc_return(y);
> - WRITE_ONCE(*x, 1);
> - spin_unlock(lck);
> + spin_lock(&lck);
> + r2 = atomic_inc_return(&y);
> + WRITE_ONCE(x, 1);
> + spin_unlock(&lck);
> }
>
> - P1(int *x, int *y, int *lck)
> + void CPU1(void)
> {
> - int r0;
> - int r1;
> - int r2;
> -
> - spin_lock(lck);
> - r0 = READ_ONCE(*x);
> - r1 = READ_ONCE(*x);
> - r2 = atomic_inc_return(y);
> - spin_unlock(lck);
> + spin_lock(&lck);
> + r0 = READ_ONCE(x);
> + r1 = READ_ONCE(x);
> + r2 = atomic_inc_return(&y);
> + spin_unlock(&lck);
> }
>
> - locations [x;lck;0:r2;1:r0;1:r1;1:r2]
> - filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
> + filter (1:r0=0 /\ 1:r1=1)
> exists (1:r2=1)
>
> -If "1:r0" is equal to "0", "1:r1" can never equal "1" because P0()
> -cannot update "x" while P1() holds the lock. And herd7 confirms this,
> +If "1:r0" is equal to "0", "1:r1" can never equal "1" because CPU0()
> +cannot update "x" while CPU1() holds the lock. And herd7 confirms this,
> showing zero executions matching the "filter" criteria.
>
> And this is why Linux-kernel lock and unlock primitives must prevent
>
>
>
On Thu, 23 Mar 2023 11:52:15 -0700, Paul E. McKenney wrote:
> On Thu, Mar 23, 2023 at 11:52:57AM +0900, Akira Yokosawa wrote:
>> Hi Paul,
>>
>> On Mon, 20 Mar 2023 18:05:19 -0700, Paul E. McKenney wrote:
>>> Most Linux-kernel uses of locking are straightforward, but there are
>>> corner-case uses that rely on less well-known aspects of the lock and
>>> unlock primitives. This commit therefore adds a locking.txt and litmus
>>> tests in Documentation/litmus-tests/locking to explain these corner-case
>>> uses.
>>>
>>> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
>>> ---
>>> .../litmus-tests/locking/DCL-broken.litmus | 55 +++
>>> .../litmus-tests/locking/DCL-fixed.litmus | 56 +++
>>> .../litmus-tests/locking/RM-broken.litmus | 42 +++
>>> .../litmus-tests/locking/RM-fixed.litmus | 42 +++
>>> tools/memory-model/Documentation/locking.txt | 320 ++++++++++++++++++
>>
>> I think the documentation needs adjustment to cope with Andrea's change
>> of litmus tests.
>>
>> Also, coding style of code snippets taken from litmus tests look somewhat
>> inconsistent with other snippets taken from MP+... litmus tests:
>>
>> - Simple function signature such as "void CPU0(void)".
>> - No declaration of local variables.
>> - Indirection level of global variables.
>> - No "locations" clause
>>
>> How about applying the diff below?
>
> Good eyes, thank you! I will fold this in with attribution.
Feel free to add
Reviewed-by: Akira Yokosawa <akiyks@gmail.com>
Thanks, Akira
>
> Thanx, Paul
>
>> Thanks, Akira
On Fri, Mar 24, 2023 at 08:30:38AM +0900, Akira Yokosawa wrote: > On Thu, 23 Mar 2023 11:52:15 -0700, Paul E. McKenney wrote: > > On Thu, Mar 23, 2023 at 11:52:57AM +0900, Akira Yokosawa wrote: > >> Hi Paul, > >> > >> On Mon, 20 Mar 2023 18:05:19 -0700, Paul E. McKenney wrote: > >>> Most Linux-kernel uses of locking are straightforward, but there are > >>> corner-case uses that rely on less well-known aspects of the lock and > >>> unlock primitives. This commit therefore adds a locking.txt and litmus > >>> tests in Documentation/litmus-tests/locking to explain these corner-case > >>> uses. > >>> > >>> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> > >>> --- > >>> .../litmus-tests/locking/DCL-broken.litmus | 55 +++ > >>> .../litmus-tests/locking/DCL-fixed.litmus | 56 +++ > >>> .../litmus-tests/locking/RM-broken.litmus | 42 +++ > >>> .../litmus-tests/locking/RM-fixed.litmus | 42 +++ > >>> tools/memory-model/Documentation/locking.txt | 320 ++++++++++++++++++ > >> > >> I think the documentation needs adjustment to cope with Andrea's change > >> of litmus tests. > >> > >> Also, coding style of code snippets taken from litmus tests look somewhat > >> inconsistent with other snippets taken from MP+... litmus tests: > >> > >> - Simple function signature such as "void CPU0(void)". > >> - No declaration of local variables. > >> - Indirection level of global variables. > >> - No "locations" clause > >> > >> How about applying the diff below? > > > > Good eyes, thank you! I will fold this in with attribution. > > Feel free to add > > Reviewed-by: Akira Yokosawa <akiyks@gmail.com> Thank you, I will apply on my next full rebase. Thanx, Paul
> create mode 100644 Documentation/litmus-tests/locking/DCL-broken.litmus
> create mode 100644 Documentation/litmus-tests/locking/DCL-fixed.litmus
> create mode 100644 Documentation/litmus-tests/locking/RM-broken.litmus
> create mode 100644 Documentation/litmus-tests/locking/RM-fixed.litmus
Unfortunately none of them were liked by klitmus7/gcc, the diff below
works for me but please double check.
Andrea
diff --git a/Documentation/litmus-tests/locking/DCL-broken.litmus b/Documentation/litmus-tests/locking/DCL-broken.litmus
index cfaa25ff82b1e..bfb7ba4316d69 100644
--- a/Documentation/litmus-tests/locking/DCL-broken.litmus
+++ b/Documentation/litmus-tests/locking/DCL-broken.litmus
@@ -10,10 +10,9 @@ C DCL-broken
{
int flag;
int data;
- int lck;
}
-P0(int *flag, int *data, int *lck)
+P0(int *flag, int *data, spinlock_t *lck)
{
int r0;
int r1;
@@ -32,7 +31,7 @@ P0(int *flag, int *data, int *lck)
r2 = READ_ONCE(*data);
}
-P1(int *flag, int *data, int *lck)
+P1(int *flag, int *data, spinlock_t *lck)
{
int r0;
int r1;
@@ -51,5 +50,5 @@ P1(int *flag, int *data, int *lck)
r2 = READ_ONCE(*data);
}
-locations [flag;data;lck;0:r0;0:r1;1:r0;1:r1]
+locations [flag;data;0:r0;0:r1;1:r0;1:r1]
exists (0:r2=0 \/ 1:r2=0)
diff --git a/Documentation/litmus-tests/locking/DCL-fixed.litmus b/Documentation/litmus-tests/locking/DCL-fixed.litmus
index 579d6c246f167..d1b60bcb0c8f3 100644
--- a/Documentation/litmus-tests/locking/DCL-fixed.litmus
+++ b/Documentation/litmus-tests/locking/DCL-fixed.litmus
@@ -11,10 +11,9 @@ C DCL-fixed
{
int flag;
int data;
- int lck;
}
-P0(int *flag, int *data, int *lck)
+P0(int *flag, int *data, spinlock_t *lck)
{
int r0;
int r1;
@@ -33,7 +32,7 @@ P0(int *flag, int *data, int *lck)
r2 = READ_ONCE(*data);
}
-P1(int *flag, int *data, int *lck)
+P1(int *flag, int *data, spinlock_t *lck)
{
int r0;
int r1;
@@ -52,5 +51,5 @@ P1(int *flag, int *data, int *lck)
r2 = READ_ONCE(*data);
}
-locations [flag;data;lck;0:r0;0:r1;1:r0;1:r1]
+locations [flag;data;0:r0;0:r1;1:r0;1:r1]
exists (0:r2=0 \/ 1:r2=0)
diff --git a/Documentation/litmus-tests/locking/RM-broken.litmus b/Documentation/litmus-tests/locking/RM-broken.litmus
index c586ae4b547de..b7ef30cedfe51 100644
--- a/Documentation/litmus-tests/locking/RM-broken.litmus
+++ b/Documentation/litmus-tests/locking/RM-broken.litmus
@@ -9,12 +9,11 @@ C RM-broken
*)
{
- int lck;
int x;
- int y;
+ atomic_t y;
}
-P0(int *x, int *y, int *lck)
+P0(int *x, atomic_t *y, spinlock_t *lck)
{
int r2;
@@ -24,7 +23,7 @@ P0(int *x, int *y, int *lck)
spin_unlock(lck);
}
-P1(int *x, int *y, int *lck)
+P1(int *x, atomic_t *y, spinlock_t *lck)
{
int r0;
int r1;
@@ -37,6 +36,6 @@ P1(int *x, int *y, int *lck)
spin_unlock(lck);
}
-locations [x;lck;0:r2;1:r0;1:r1;1:r2]
-filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
+locations [x;0:r2;1:r0;1:r1;1:r2]
+filter (1:r0=0 /\ 1:r1=1)
exists (1:r2=1)
diff --git a/Documentation/litmus-tests/locking/RM-fixed.litmus b/Documentation/litmus-tests/locking/RM-fixed.litmus
index 672856736b42e..b628175596160 100644
--- a/Documentation/litmus-tests/locking/RM-fixed.litmus
+++ b/Documentation/litmus-tests/locking/RM-fixed.litmus
@@ -9,12 +9,11 @@ C RM-fixed
*)
{
- int lck;
int x;
- int y;
+ atomic_t y;
}
-P0(int *x, int *y, int *lck)
+P0(int *x, atomic_t *y, spinlock_t *lck)
{
int r2;
@@ -24,7 +23,7 @@ P0(int *x, int *y, int *lck)
spin_unlock(lck);
}
-P1(int *x, int *y, int *lck)
+P1(int *x, atomic_t *y, spinlock_t *lck)
{
int r0;
int r1;
@@ -37,6 +36,6 @@ P1(int *x, int *y, int *lck)
spin_unlock(lck);
}
-locations [x;lck;0:r2;1:r0;1:r1;1:r2]
-filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
+locations [x;0:r2;1:r0;1:r1;1:r2]
+filter (1:r0=0 /\ 1:r1=1)
exists (1:r2=1)
On Wed, Mar 22, 2023 at 09:59:38AM +0100, Andrea Parri wrote:
> > create mode 100644 Documentation/litmus-tests/locking/DCL-broken.litmus
> > create mode 100644 Documentation/litmus-tests/locking/DCL-fixed.litmus
> > create mode 100644 Documentation/litmus-tests/locking/RM-broken.litmus
> > create mode 100644 Documentation/litmus-tests/locking/RM-fixed.litmus
>
> Unfortunately none of them were liked by klitmus7/gcc, the diff below
> works for me but please double check.
Applied with attribution, thank you!
I was surprised by the need to change the "locations" clauses, but
applied that change anyway. Ah, I take it that klitmus prints that,
but doesn't know how to print out a spinlock_t?
Dropping "y" from the "filter" clause also gave me pause, but I eventually
convinced myself that it was OK. But it would be good for others to
also take a close look.
Thanx, Paul
> Andrea
>
>
> diff --git a/Documentation/litmus-tests/locking/DCL-broken.litmus b/Documentation/litmus-tests/locking/DCL-broken.litmus
> index cfaa25ff82b1e..bfb7ba4316d69 100644
> --- a/Documentation/litmus-tests/locking/DCL-broken.litmus
> +++ b/Documentation/litmus-tests/locking/DCL-broken.litmus
> @@ -10,10 +10,9 @@ C DCL-broken
> {
> int flag;
> int data;
> - int lck;
> }
>
> -P0(int *flag, int *data, int *lck)
> +P0(int *flag, int *data, spinlock_t *lck)
> {
> int r0;
> int r1;
> @@ -32,7 +31,7 @@ P0(int *flag, int *data, int *lck)
> r2 = READ_ONCE(*data);
> }
>
> -P1(int *flag, int *data, int *lck)
> +P1(int *flag, int *data, spinlock_t *lck)
> {
> int r0;
> int r1;
> @@ -51,5 +50,5 @@ P1(int *flag, int *data, int *lck)
> r2 = READ_ONCE(*data);
> }
>
> -locations [flag;data;lck;0:r0;0:r1;1:r0;1:r1]
> +locations [flag;data;0:r0;0:r1;1:r0;1:r1]
> exists (0:r2=0 \/ 1:r2=0)
> diff --git a/Documentation/litmus-tests/locking/DCL-fixed.litmus b/Documentation/litmus-tests/locking/DCL-fixed.litmus
> index 579d6c246f167..d1b60bcb0c8f3 100644
> --- a/Documentation/litmus-tests/locking/DCL-fixed.litmus
> +++ b/Documentation/litmus-tests/locking/DCL-fixed.litmus
> @@ -11,10 +11,9 @@ C DCL-fixed
> {
> int flag;
> int data;
> - int lck;
> }
>
> -P0(int *flag, int *data, int *lck)
> +P0(int *flag, int *data, spinlock_t *lck)
> {
> int r0;
> int r1;
> @@ -33,7 +32,7 @@ P0(int *flag, int *data, int *lck)
> r2 = READ_ONCE(*data);
> }
>
> -P1(int *flag, int *data, int *lck)
> +P1(int *flag, int *data, spinlock_t *lck)
> {
> int r0;
> int r1;
> @@ -52,5 +51,5 @@ P1(int *flag, int *data, int *lck)
> r2 = READ_ONCE(*data);
> }
>
> -locations [flag;data;lck;0:r0;0:r1;1:r0;1:r1]
> +locations [flag;data;0:r0;0:r1;1:r0;1:r1]
> exists (0:r2=0 \/ 1:r2=0)
> diff --git a/Documentation/litmus-tests/locking/RM-broken.litmus b/Documentation/litmus-tests/locking/RM-broken.litmus
> index c586ae4b547de..b7ef30cedfe51 100644
> --- a/Documentation/litmus-tests/locking/RM-broken.litmus
> +++ b/Documentation/litmus-tests/locking/RM-broken.litmus
> @@ -9,12 +9,11 @@ C RM-broken
> *)
>
> {
> - int lck;
> int x;
> - int y;
> + atomic_t y;
> }
>
> -P0(int *x, int *y, int *lck)
> +P0(int *x, atomic_t *y, spinlock_t *lck)
> {
> int r2;
>
> @@ -24,7 +23,7 @@ P0(int *x, int *y, int *lck)
> spin_unlock(lck);
> }
>
> -P1(int *x, int *y, int *lck)
> +P1(int *x, atomic_t *y, spinlock_t *lck)
> {
> int r0;
> int r1;
> @@ -37,6 +36,6 @@ P1(int *x, int *y, int *lck)
> spin_unlock(lck);
> }
>
> -locations [x;lck;0:r2;1:r0;1:r1;1:r2]
> -filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
> +locations [x;0:r2;1:r0;1:r1;1:r2]
> +filter (1:r0=0 /\ 1:r1=1)
> exists (1:r2=1)
> diff --git a/Documentation/litmus-tests/locking/RM-fixed.litmus b/Documentation/litmus-tests/locking/RM-fixed.litmus
> index 672856736b42e..b628175596160 100644
> --- a/Documentation/litmus-tests/locking/RM-fixed.litmus
> +++ b/Documentation/litmus-tests/locking/RM-fixed.litmus
> @@ -9,12 +9,11 @@ C RM-fixed
> *)
>
> {
> - int lck;
> int x;
> - int y;
> + atomic_t y;
> }
>
> -P0(int *x, int *y, int *lck)
> +P0(int *x, atomic_t *y, spinlock_t *lck)
> {
> int r2;
>
> @@ -24,7 +23,7 @@ P0(int *x, int *y, int *lck)
> spin_unlock(lck);
> }
>
> -P1(int *x, int *y, int *lck)
> +P1(int *x, atomic_t *y, spinlock_t *lck)
> {
> int r0;
> int r1;
> @@ -37,6 +36,6 @@ P1(int *x, int *y, int *lck)
> spin_unlock(lck);
> }
>
> -locations [x;lck;0:r2;1:r0;1:r1;1:r2]
> -filter (y=2 /\ 1:r0=0 /\ 1:r1=1)
> +locations [x;0:r2;1:r0;1:r1;1:r2]
> +filter (1:r0=0 /\ 1:r1=1)
> exists (1:r2=1)
> I was surprised by the need to change the "locations" clauses, but > applied that change anyway. Ah, I take it that klitmus prints that, > but doesn't know how to print out a spinlock_t? Yep, this aligns with my understanding. Andrea
© 2016 - 2026 Red Hat, Inc.