[PATCH v3 4/5] tools/memory-model: Switch to softcoded herd7 tags

Jonas Oberhauser posted 5 patches 2 months, 1 week ago
There is a newer version of this series
[PATCH v3 4/5] tools/memory-model: Switch to softcoded herd7 tags
Posted by Jonas Oberhauser 2 months, 1 week ago
A new version of Herd7 provides a -lkmmv1 switch which overrides the old herd7
behavior of simply ignoring any softcoded tags in the .def and .bell files. We
port LKMM to this version of Herd7 by providing the switch in linux-kernel.cfg
and reporting an error if the LKMM is used without this switch.

To preserve the semantics of LKMM, we also softcode the Noreturn tag on atomic
RMW which do not return a value and define atomic_add_unless with an Mb tag in
linux-kernel.def.

We update the herd-representation.txt accordingly and clarify some of the
resulting combinations.

We also add a litmus test for atomic_add_unless which uncovered a bug in early
iterations of the Herd7 patch that implements the new switch.

(To be) Signed-off-by: Hernan Ponce de Leon <hernan.poncedeleon@huaweicloud.com>
Signed-off by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com>
---
 .../Documentation/herd-representation.txt     | 27 ++++++++++---------
 tools/memory-model/linux-kernel.bell          |  3 +++
 tools/memory-model/linux-kernel.cfg           |  1 +
 tools/memory-model/linux-kernel.def           | 18 +++++++------
 .../litmus-tests/add-unless-mb.litmus         | 27 +++++++++++++++++++
 5 files changed, 56 insertions(+), 20 deletions(-)
 create mode 100644 tools/memory-model/litmus-tests/add-unless-mb.litmus

diff --git a/tools/memory-model/Documentation/herd-representation.txt b/tools/memory-model/Documentation/herd-representation.txt
index ed988906f2b7..7ae1ff3d3769 100644
--- a/tools/memory-model/Documentation/herd-representation.txt
+++ b/tools/memory-model/Documentation/herd-representation.txt
@@ -18,6 +18,11 @@
 #
 # By convention, a blank line in a cell means "same as the preceding line".
 #
+# Note that the syntactic representation does not always match the sets and
+# relations in linux-kernel.cat, due to redefinitions in linux-kernel.bell and
+# lock.cat. For example, the po link between LKR and LKW is upgraded to an rmw
+# link, and W[acquire] are not included in the Acquire set.
+#
 # Disclaimer.  The table includes representations of "add" and "and" operations;
 # corresponding/identical representations of "sub", "inc", "dec" and "or", "xor",
 # "andnot" operations are omitted.
@@ -60,14 +65,13 @@
     ------------------------------------------------------------------------------
     |       RMW ops w/o return value |                                           |
     ------------------------------------------------------------------------------
-    |                     atomic_add | R*[noreturn] ->rmw W*[once]               |
+    |                     atomic_add | R*[noreturn] ->rmw W*[noreturn]           |
     |                     atomic_and |                                           |
     |                      spin_lock | LKR ->po LKW                              |
     ------------------------------------------------------------------------------
     |        RMW ops w/ return value |                                           |
     ------------------------------------------------------------------------------
-    |              atomic_add_return | F[mb] ->po R*[once]                       |
-    |                                |     ->rmw W*[once] ->po F[mb]             |
+    |              atomic_add_return | R*[mb] ->rmw W*[mb]                       |
     |               atomic_fetch_add |                                           |
     |               atomic_fetch_and |                                           |
     |                    atomic_xchg |                                           |
@@ -79,13 +83,13 @@
     |            atomic_xchg_relaxed |                                           |
     |                   xchg_relaxed |                                           |
     |    atomic_add_negative_relaxed |                                           |
-    |      atomic_add_return_acquire | R*[acquire] ->rmw W*[once]                |
+    |      atomic_add_return_acquire | R*[acquire] ->rmw W*[acquire]             |
     |       atomic_fetch_add_acquire |                                           |
     |       atomic_fetch_and_acquire |                                           |
     |            atomic_xchg_acquire |                                           |
     |                   xchg_acquire |                                           |
     |    atomic_add_negative_acquire |                                           |
-    |      atomic_add_return_release | R*[once] ->rmw W*[release]                |
+    |      atomic_add_return_release | R*[release] ->rmw W*[release]             |
     |       atomic_fetch_add_release |                                           |
     |       atomic_fetch_and_release |                                           |
     |            atomic_xchg_release |                                           |
@@ -94,17 +98,16 @@
     ------------------------------------------------------------------------------
     |            Conditional RMW ops |                                           |
     ------------------------------------------------------------------------------
-    |                 atomic_cmpxchg | On success: F[mb] ->po R*[once]           |
-    |                                |                 ->rmw W*[once] ->po F[mb] |
-    |                                | On failure: R*[once]                      |
+    |                 atomic_cmpxchg | On success: R*[mb] ->rmw W*[mb]           |
+    |                                | On failure: R*[mb]                        |
     |                        cmpxchg |                                           |
     |              atomic_add_unless |                                           |
     |         atomic_cmpxchg_relaxed | On success: R*[once] ->rmw W*[once]       |
     |                                | On failure: R*[once]                      |
-    |         atomic_cmpxchg_acquire | On success: R*[acquire] ->rmw W*[once]    |
-    |                                | On failure: R*[once]                      |
-    |         atomic_cmpxchg_release | On success: R*[once] ->rmw W*[release]    |
-    |                                | On failure: R*[once]                      |
+    |         atomic_cmpxchg_acquire | On success: R*[acquire] ->rmw W*[acquire] |
+    |                                | On failure: R*[acquire]                   |
+    |         atomic_cmpxchg_release | On success: R*[release] ->rmw W*[release] |
+    |                                | On failure: R*[release]                   |
     |                   spin_trylock | On success: LKR ->po LKW                  |
     |                                | On failure: LF                            |
     ------------------------------------------------------------------------------
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index 7c9ae48b9437..703028e5e091 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -94,3 +94,6 @@ let carry-dep = (data ; [~ Srcu-unlock] ; rfi)*
 let addr = carry-dep ; addr
 let ctrl = carry-dep ; ctrl
 let data = carry-dep ; data
+
+flag ~empty (if "lkmmv1" then 0 else _)
+  as this-model-requires-variant-higher-than-lkmmv0
diff --git a/tools/memory-model/linux-kernel.cfg b/tools/memory-model/linux-kernel.cfg
index 3c8098e99f41..a5855363259a 100644
--- a/tools/memory-model/linux-kernel.cfg
+++ b/tools/memory-model/linux-kernel.cfg
@@ -1,6 +1,7 @@
 macros linux-kernel.def
 bell linux-kernel.bell
 model linux-kernel.cat
+variant lkmmv1
 graph columns
 squished true
 showevents noregs
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index a12b96c547b7..4281572732bd 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -63,14 +63,14 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); }
 atomic_read_acquire(X) smp_load_acquire(X)
 atomic_set_release(X,V) { smp_store_release(X,V); }
 
-atomic_add(V,X) { __atomic_op(X,+,V); }
-atomic_sub(V,X) { __atomic_op(X,-,V); }
-atomic_and(V,X) { __atomic_op(X,&,V); }
-atomic_or(V,X)  { __atomic_op(X,|,V); }
-atomic_xor(V,X) { __atomic_op(X,^,V); }
-atomic_inc(X)   { __atomic_op(X,+,1); }
-atomic_dec(X)   { __atomic_op(X,-,1); }
-atomic_andnot(V,X) { __atomic_op(X,&~,V); }
+atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); }
+atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); }
+atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); }
+atomic_or(V,X)  { __atomic_op{noreturn}(X,|,V); }
+atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); }
+atomic_inc(X)   { __atomic_op{noreturn}(X,+,1); }
+atomic_dec(X)   { __atomic_op{noreturn}(X,-,1); }
+atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); }
 
 atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
 atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
@@ -144,3 +144,5 @@ atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V)
 atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V)
 atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V)
 atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V)
+
+atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W)
\ No newline at end of file
diff --git a/tools/memory-model/litmus-tests/add-unless-mb.litmus b/tools/memory-model/litmus-tests/add-unless-mb.litmus
new file mode 100644
index 000000000000..72f76ff3f59d
--- /dev/null
+++ b/tools/memory-model/litmus-tests/add-unless-mb.litmus
@@ -0,0 +1,27 @@
+C add_unless_mb
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that a successful atomic_add_unless
+ * acts as a full memory barrier, ensuring that *x=1 propagates to P1
+ * before P1 executes *x=2.
+ *)
+
+{}
+
+P0(atomic_t *x, atomic_t *y, atomic_t *z)
+{
+	WRITE_ONCE(*x, 1);
+	int r0 = atomic_add_unless(z,1,5);
+	WRITE_ONCE(*y, 1);
+}
+
+P1(atomic_t *x, atomic_t *y)
+{
+	int r0 = READ_ONCE(*y);
+	if (r0 == 1)
+		WRITE_ONCE(*x, 2);
+}
+
+exists (1:r0=1 /\ x=1)
-- 
2.34.1
Re: [PATCH v3 4/5] tools/memory-model: Switch to softcoded herd7 tags
Posted by Akira Yokosawa 2 months, 1 week ago
On Thu, 19 Sep 2024 15:06:33 +0200, Jonas Oberhauser wrote:
> A new version of Herd7 provides a -lkmmv1 switch which overrides the old herd7

Why -lkmmv1?
You mean current (unversioned) LKMM has to be called v0 ???

My preference is to call current one as v1 and your new version as v2.

Either way,

Reviewed-by: Akira Yokosawa <akiyks@gmail.com>

Please find a few more comments inline below.

> behavior of simply ignoring any softcoded tags in the .def and .bell files. We
> port LKMM to this version of Herd7 by providing the switch in linux-kernel.cfg
> and reporting an error if the LKMM is used without this switch.
> 
> To preserve the semantics of LKMM, we also softcode the Noreturn tag on atomic
> RMW which do not return a value and define atomic_add_unless with an Mb tag in
> linux-kernel.def.
> 
> We update the herd-representation.txt accordingly and clarify some of the
> resulting combinations.
> 
> We also add a litmus test for atomic_add_unless which uncovered a bug in early
> iterations of the Herd7 patch that implements the new switch.
> 
> (To be) Signed-off-by: Hernan Ponce de Leon <hernan.poncedeleon@huaweicloud.com>
> Signed-off by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com>
> ---
>  .../Documentation/herd-representation.txt     | 27 ++++++++++---------
>  tools/memory-model/linux-kernel.bell          |  3 +++
>  tools/memory-model/linux-kernel.cfg           |  1 +
>  tools/memory-model/linux-kernel.def           | 18 +++++++------
>  .../litmus-tests/add-unless-mb.litmus         | 27 +++++++++++++++++++
>  5 files changed, 56 insertions(+), 20 deletions(-)
>  create mode 100644 tools/memory-model/litmus-tests/add-unless-mb.litmus
> 
[...]

> diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
> index a12b96c547b7..4281572732bd 100644
> --- a/tools/memory-model/linux-kernel.def
> +++ b/tools/memory-model/linux-kernel.def
> @@ -63,14 +63,14 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); }
>  atomic_read_acquire(X) smp_load_acquire(X)
>  atomic_set_release(X,V) { smp_store_release(X,V); }
>  
> -atomic_add(V,X) { __atomic_op(X,+,V); }
> -atomic_sub(V,X) { __atomic_op(X,-,V); }
> -atomic_and(V,X) { __atomic_op(X,&,V); }
> -atomic_or(V,X)  { __atomic_op(X,|,V); }
> -atomic_xor(V,X) { __atomic_op(X,^,V); }
> -atomic_inc(X)   { __atomic_op(X,+,1); }
> -atomic_dec(X)   { __atomic_op(X,-,1); }
> -atomic_andnot(V,X) { __atomic_op(X,&~,V); }
> +atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); }
> +atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); }
> +atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); }
> +atomic_or(V,X)  { __atomic_op{noreturn}(X,|,V); }
> +atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); }
> +atomic_inc(X)   { __atomic_op{noreturn}(X,+,1); }
> +atomic_dec(X)   { __atomic_op{noreturn}(X,-,1); }
> +atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); }
>  
>  atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
>  atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
> @@ -144,3 +144,5 @@ atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V)
>  atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V)
>  atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V)
>  atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V)
> +
> +atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W)
> \ No newline at end of file

Please fix this warning.

> diff --git a/tools/memory-model/litmus-tests/add-unless-mb.litmus b/tools/memory-model/litmus-tests/add-unless-mb.litmus
> new file mode 100644
> index 000000000000..72f76ff3f59d
> --- /dev/null
> +++ b/tools/memory-model/litmus-tests/add-unless-mb.litmus
> @@ -0,0 +1,27 @@
> +C add_unless_mb
> +
> +(*
> + * Result: Never
> + *
> + * This litmus test demonstrates that a successful atomic_add_unless
> + * acts as a full memory barrier, ensuring that *x=1 propagates to P1
> + * before P1 executes *x=2.
> + *)
> +
> +{}
> +
> +P0(atomic_t *x, atomic_t *y, atomic_t *z)
> +{
> +	WRITE_ONCE(*x, 1);
> +	int r0 = atomic_add_unless(z,1,5);
> +	WRITE_ONCE(*y, 1);
> +}
> +
> +P1(atomic_t *x, atomic_t *y)
> +{
> +	int r0 = READ_ONCE(*y);
> +	if (r0 == 1)
> +		WRITE_ONCE(*x, 2);
> +}
> +
> +exists (1:r0=1 /\ x=1)

This litmus test is not compatible with klitmus7, which is much
stricter than herd7's C parser.

You can have only int or int* variables in the exists clause.
Register variables need their declarations at the top of each Pn()
(classic C).

See below for klitmus7 ready code.

And tools/memory-model/litmus-tests/README need to mention this
litmus test.

        Thanks, Akira

---------------------------------------------
P0(int *x, int *y, atomic_t *z)
{
	int r0;

	WRITE_ONCE(*x, 1);
	r0 = atomic_add_unless(z,1,5);
	WRITE_ONCE(*y, 1);
}

P1(int *x, int *y)
{
	int r0;

	r0 = READ_ONCE(*y);
	if (r0 == 1)
		WRITE_ONCE(*x, 2);
}

exists (1:r0=1 /\ x=1)
---------------------------------------------
Re: [PATCH v3 4/5] tools/memory-model: Switch to softcoded herd7 tags
Posted by Jonas Oberhauser 2 months, 1 week ago
Thanks Akira for your continued eagle eyes!
Will include in next revision.

One question below.
  jonas



Am 9/21/2024 um 4:44 AM schrieb Akira Yokosawa:
> This litmus test is not compatible with klitmus7, which is much
> stricter than herd7's C parser.
> 
> You can have only int or int* variables in the exists clause.
> Register variables need their declarations at the top of each Pn()
> (classic C).
> 
> See below for klitmus7 ready code.
> 
> And tools/memory-model/litmus-tests/README need to mention this
> litmus test.
> 
>          Thanks, Akira
> 
> ---------------------------------------------
> P0(int *x, int *y, atomic_t *z)
> {
> 	int r0;
> 
> 	WRITE_ONCE(*x, 1);
> 	r0 = atomic_add_unless(z,1,5);
> 	WRITE_ONCE(*y, 1);
> }
> 
> P1(int *x, int *y)
> {
> 	int r0;
> 
> 	r0 = READ_ONCE(*y);
> 	if (r0 == 1)
> 		WRITE_ONCE(*x, 2);
> }
> 
> exists (1:r0=1 /\ x=1)
> ---------------------------------------------
> 

Should z also be changed from atomic_t to int?
Re: [PATCH v3 4/5] tools/memory-model: Switch to softcoded herd7 tags
Posted by Akira Yokosawa 2 months, 1 week ago
Hi,

On Sat, 21 Sep 2024 09:39:05 +0200, Jonas Oberhauser wrote:
> Thanks Akira for your continued eagle eyes!
> Will include in next revision.
> 
> One question below.
>  jonas
> 
> 
> 
> Am 9/21/2024 um 4:44 AM schrieb Akira Yokosawa:
>> This litmus test is not compatible with klitmus7, which is much
>> stricter than herd7's C parser.
>>
>> You can have only int or int* variables in the exists clause.

I should have said:

   By default, you can have only int or int* variables in the exists clause.

You can find an example where an atomic_t variable is listed in its exists
clause at:

    Documentation/litmus-tests/atomic/Atomic-RMW-ops-are-atomic-WRT-atomic_set.litmus

, where the atomic_t variable is declared explicitly in the initialization
block as follows:

    {
	atomic_t v = ATOMIC_INIT(1);
    }

>> Register variables need their declarations at the top of each Pn()
>> (classic C).
>>
>> See below for klitmus7 ready code.
>>
>> And tools/memory-model/litmus-tests/README need to mention this
>> litmus test.
>>
>>          Thanks, Akira
>>
>> ---------------------------------------------
>> P0(int *x, int *y, atomic_t *z)
>> {
>>     int r0;
>>
>>     WRITE_ONCE(*x, 1);
>>     r0 = atomic_add_unless(z,1,5);
>>     WRITE_ONCE(*y, 1);
>> }
>>
>> P1(int *x, int *y)
>> {
>>     int r0;
>>
>>     r0 = READ_ONCE(*y);
>>     if (r0 == 1)
>>         WRITE_ONCE(*x, 2);
>> }
>>
>> exists (1:r0=1 /\ x=1)
>> ---------------------------------------------
>>
> 
> Should z also be changed from atomic_t to int?
>

No, it should not.
Such a change would make z incompatible with atomic_add_unless().

        Thanks, Akira
Re: [PATCH v3 4/5] tools/memory-model: Switch to softcoded herd7 tags
Posted by Hernan Ponce de Leon 2 months, 1 week ago
On 9/19/2024 3:06 PM, Jonas Oberhauser wrote:
> A new version of Herd7 provides a -lkmmv1 switch which overrides the old herd7
> behavior of simply ignoring any softcoded tags in the .def and .bell files. We
> port LKMM to this version of Herd7 by providing the switch in linux-kernel.cfg
> and reporting an error if the LKMM is used without this switch.

The changes to herd7 are ready to be merged.

     https://github.com/herd/herdtools7/pull/865

I just asked Luc to put this on hold until this series is reviewed to be 
sure no more changes are needed.

> 
> To preserve the semantics of LKMM, we also softcode the Noreturn tag on atomic
> RMW which do not return a value and define atomic_add_unless with an Mb tag in
> linux-kernel.def.
> 
> We update the herd-representation.txt accordingly and clarify some of the
> resulting combinations.
> 
> We also add a litmus test for atomic_add_unless which uncovered a bug in early
> iterations of the Herd7 patch that implements the new switch.
> 
> (To be) Signed-off-by: Hernan Ponce de Leon <hernan.poncedeleon@huaweicloud.com>

Signed-off-by: Hernan Ponce de Leon <hernan.poncedeleon@huaweicloud.com>

> Signed-off by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com>
> ---
>   .../Documentation/herd-representation.txt     | 27 ++++++++++---------
>   tools/memory-model/linux-kernel.bell          |  3 +++
>   tools/memory-model/linux-kernel.cfg           |  1 +
>   tools/memory-model/linux-kernel.def           | 18 +++++++------
>   .../litmus-tests/add-unless-mb.litmus         | 27 +++++++++++++++++++
>   5 files changed, 56 insertions(+), 20 deletions(-)
>   create mode 100644 tools/memory-model/litmus-tests/add-unless-mb.litmus
> 
> diff --git a/tools/memory-model/Documentation/herd-representation.txt b/tools/memory-model/Documentation/herd-representation.txt
> index ed988906f2b7..7ae1ff3d3769 100644
> --- a/tools/memory-model/Documentation/herd-representation.txt
> +++ b/tools/memory-model/Documentation/herd-representation.txt
> @@ -18,6 +18,11 @@
>   #
>   # By convention, a blank line in a cell means "same as the preceding line".
>   #
> +# Note that the syntactic representation does not always match the sets and
> +# relations in linux-kernel.cat, due to redefinitions in linux-kernel.bell and
> +# lock.cat. For example, the po link between LKR and LKW is upgraded to an rmw
> +# link, and W[acquire] are not included in the Acquire set.
> +#
>   # Disclaimer.  The table includes representations of "add" and "and" operations;
>   # corresponding/identical representations of "sub", "inc", "dec" and "or", "xor",
>   # "andnot" operations are omitted.
> @@ -60,14 +65,13 @@
>       ------------------------------------------------------------------------------
>       |       RMW ops w/o return value |                                           |
>       ------------------------------------------------------------------------------
> -    |                     atomic_add | R*[noreturn] ->rmw W*[once]               |
> +    |                     atomic_add | R*[noreturn] ->rmw W*[noreturn]           |
>       |                     atomic_and |                                           |
>       |                      spin_lock | LKR ->po LKW                              |
>       ------------------------------------------------------------------------------
>       |        RMW ops w/ return value |                                           |
>       ------------------------------------------------------------------------------
> -    |              atomic_add_return | F[mb] ->po R*[once]                       |
> -    |                                |     ->rmw W*[once] ->po F[mb]             |
> +    |              atomic_add_return | R*[mb] ->rmw W*[mb]                       |
>       |               atomic_fetch_add |                                           |
>       |               atomic_fetch_and |                                           |
>       |                    atomic_xchg |                                           |
> @@ -79,13 +83,13 @@
>       |            atomic_xchg_relaxed |                                           |
>       |                   xchg_relaxed |                                           |
>       |    atomic_add_negative_relaxed |                                           |
> -    |      atomic_add_return_acquire | R*[acquire] ->rmw W*[once]                |
> +    |      atomic_add_return_acquire | R*[acquire] ->rmw W*[acquire]             |
>       |       atomic_fetch_add_acquire |                                           |
>       |       atomic_fetch_and_acquire |                                           |
>       |            atomic_xchg_acquire |                                           |
>       |                   xchg_acquire |                                           |
>       |    atomic_add_negative_acquire |                                           |
> -    |      atomic_add_return_release | R*[once] ->rmw W*[release]                |
> +    |      atomic_add_return_release | R*[release] ->rmw W*[release]             |
>       |       atomic_fetch_add_release |                                           |
>       |       atomic_fetch_and_release |                                           |
>       |            atomic_xchg_release |                                           |
> @@ -94,17 +98,16 @@
>       ------------------------------------------------------------------------------
>       |            Conditional RMW ops |                                           |
>       ------------------------------------------------------------------------------
> -    |                 atomic_cmpxchg | On success: F[mb] ->po R*[once]           |
> -    |                                |                 ->rmw W*[once] ->po F[mb] |
> -    |                                | On failure: R*[once]                      |
> +    |                 atomic_cmpxchg | On success: R*[mb] ->rmw W*[mb]           |
> +    |                                | On failure: R*[mb]                        |
>       |                        cmpxchg |                                           |
>       |              atomic_add_unless |                                           |
>       |         atomic_cmpxchg_relaxed | On success: R*[once] ->rmw W*[once]       |
>       |                                | On failure: R*[once]                      |
> -    |         atomic_cmpxchg_acquire | On success: R*[acquire] ->rmw W*[once]    |
> -    |                                | On failure: R*[once]                      |
> -    |         atomic_cmpxchg_release | On success: R*[once] ->rmw W*[release]    |
> -    |                                | On failure: R*[once]                      |
> +    |         atomic_cmpxchg_acquire | On success: R*[acquire] ->rmw W*[acquire] |
> +    |                                | On failure: R*[acquire]                   |
> +    |         atomic_cmpxchg_release | On success: R*[release] ->rmw W*[release] |
> +    |                                | On failure: R*[release]                   |
>       |                   spin_trylock | On success: LKR ->po LKW                  |
>       |                                | On failure: LF                            |
>       ------------------------------------------------------------------------------
> diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
> index 7c9ae48b9437..703028e5e091 100644
> --- a/tools/memory-model/linux-kernel.bell
> +++ b/tools/memory-model/linux-kernel.bell
> @@ -94,3 +94,6 @@ let carry-dep = (data ; [~ Srcu-unlock] ; rfi)*
>   let addr = carry-dep ; addr
>   let ctrl = carry-dep ; ctrl
>   let data = carry-dep ; data
> +
> +flag ~empty (if "lkmmv1" then 0 else _)
> +  as this-model-requires-variant-higher-than-lkmmv0
> diff --git a/tools/memory-model/linux-kernel.cfg b/tools/memory-model/linux-kernel.cfg
> index 3c8098e99f41..a5855363259a 100644
> --- a/tools/memory-model/linux-kernel.cfg
> +++ b/tools/memory-model/linux-kernel.cfg
> @@ -1,6 +1,7 @@
>   macros linux-kernel.def
>   bell linux-kernel.bell
>   model linux-kernel.cat
> +variant lkmmv1
>   graph columns
>   squished true
>   showevents noregs
> diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
> index a12b96c547b7..4281572732bd 100644
> --- a/tools/memory-model/linux-kernel.def
> +++ b/tools/memory-model/linux-kernel.def
> @@ -63,14 +63,14 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); }
>   atomic_read_acquire(X) smp_load_acquire(X)
>   atomic_set_release(X,V) { smp_store_release(X,V); }
>   
> -atomic_add(V,X) { __atomic_op(X,+,V); }
> -atomic_sub(V,X) { __atomic_op(X,-,V); }
> -atomic_and(V,X) { __atomic_op(X,&,V); }
> -atomic_or(V,X)  { __atomic_op(X,|,V); }
> -atomic_xor(V,X) { __atomic_op(X,^,V); }
> -atomic_inc(X)   { __atomic_op(X,+,1); }
> -atomic_dec(X)   { __atomic_op(X,-,1); }
> -atomic_andnot(V,X) { __atomic_op(X,&~,V); }
> +atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); }
> +atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); }
> +atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); }
> +atomic_or(V,X)  { __atomic_op{noreturn}(X,|,V); }
> +atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); }
> +atomic_inc(X)   { __atomic_op{noreturn}(X,+,1); }
> +atomic_dec(X)   { __atomic_op{noreturn}(X,-,1); }
> +atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); }
>   
>   atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
>   atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
> @@ -144,3 +144,5 @@ atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V)
>   atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V)
>   atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V)
>   atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V)
> +
> +atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W)
> \ No newline at end of file
> diff --git a/tools/memory-model/litmus-tests/add-unless-mb.litmus b/tools/memory-model/litmus-tests/add-unless-mb.litmus
> new file mode 100644
> index 000000000000..72f76ff3f59d
> --- /dev/null
> +++ b/tools/memory-model/litmus-tests/add-unless-mb.litmus
> @@ -0,0 +1,27 @@
> +C add_unless_mb
> +
> +(*
> + * Result: Never
> + *
> + * This litmus test demonstrates that a successful atomic_add_unless
> + * acts as a full memory barrier, ensuring that *x=1 propagates to P1
> + * before P1 executes *x=2.
> + *)
> +
> +{}
> +
> +P0(atomic_t *x, atomic_t *y, atomic_t *z)
> +{
> +	WRITE_ONCE(*x, 1);
> +	int r0 = atomic_add_unless(z,1,5);
> +	WRITE_ONCE(*y, 1);
> +}
> +
> +P1(atomic_t *x, atomic_t *y)
> +{
> +	int r0 = READ_ONCE(*y);
> +	if (r0 == 1)
> +		WRITE_ONCE(*x, 2);
> +}
> +
> +exists (1:r0=1 /\ x=1)