Not all tags that are always there syntactically also provide semantic
membership in the corresponding set. For example, an 'acquire tag on a
write does not imply that the write is finally in the Acquire set and
provides acquire ordering.
To distinguish in those cases between the syntactic tags and actual
sets, we capitalize the former, so 'ACQUIRE tags may be present on both
reads and writes, but only reads will appear in the Acquire set.
For tags where the two concepts are the same we do not use specific
capitalization to make this distinction.
Reported-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com>
---
tools/memory-model/linux-kernel.bell | 22 +--
tools/memory-model/linux-kernel.def | 198 +++++++++++++--------------
2 files changed, 110 insertions(+), 110 deletions(-)
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index 8ae47545df97..fe65998002b9 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -13,18 +13,18 @@
"Linux-kernel memory consistency model"
-enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
- 'release (*smp_store_release*) ||
- 'acquire (*smp_load_acquire*) ||
- 'noreturn (* R of non-return RMW *) ||
- 'mb (*xchg(),cmpxchg(),...*)
+enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) ||
+ 'RELEASE (*smp_store_release*) ||
+ 'ACQUIRE (*smp_load_acquire*) ||
+ 'NORETURN (* R of non-return RMW *) ||
+ 'MB (*xchg(),cmpxchg(),...*)
instructions R[Accesses]
instructions W[Accesses]
instructions RMW[Accesses]
enum Barriers = 'wmb (*smp_wmb*) ||
'rmb (*smp_rmb*) ||
- 'mb (*smp_mb*) ||
+ 'MB (*smp_mb*) ||
'barrier (*barrier*) ||
'rcu-lock (*rcu_read_lock*) ||
'rcu-unlock (*rcu_read_unlock*) ||
@@ -42,10 +42,10 @@ instructions F[Barriers]
* semantic ordering, such as Acquire on a store or Mb on a failed RMW.
*)
let FailedRMW = RMW \ (domain(rmw) | range(rmw))
-let Acquire = Acquire \ W \ FailedRMW
-let Release = Release \ R \ FailedRMW
-let Mb = Mb \ FailedRMW
-let Noreturn = Noreturn \ W
+let Acquire = ACQUIRE \ W \ FailedRMW
+let Release = RELEASE \ R \ FailedRMW
+let Mb = MB \ FailedRMW
+let Noreturn = NORETURN \ W
(* SRCU *)
enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
@@ -85,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
flag ~empty different-values(srcu-rscs) as srcu-bad-value-match
(* Compute marked and plain memory accesses *)
-let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
+let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW |
LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock
let Plain = M \ Marked
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index d7279a357cba..49e402782e49 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -6,18 +6,18 @@
// which appeared in ASPLOS 2018.
// ONCE
-READ_ONCE(X) __load{once}(X)
-WRITE_ONCE(X,V) { __store{once}(X,V); }
+READ_ONCE(X) __load{ONCE}(X)
+WRITE_ONCE(X,V) { __store{ONCE}(X,V); }
// Release Acquire and friends
-smp_store_release(X,V) { __store{release}(*X,V); }
-smp_load_acquire(X) __load{acquire}(*X)
-rcu_assign_pointer(X,V) { __store{release}(X,V); }
-rcu_dereference(X) __load{once}(X)
-smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
+smp_store_release(X,V) { __store{RELEASE}(*X,V); }
+smp_load_acquire(X) __load{ACQUIRE}(*X)
+rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); }
+rcu_dereference(X) __load{ONCE}(X)
+smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; }
// Fences
-smp_mb() { __fence{mb}; }
+smp_mb() { __fence{MB}; }
smp_rmb() { __fence{rmb}; }
smp_wmb() { __fence{wmb}; }
smp_mb__before_atomic() { __fence{before-atomic}; }
@@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
barrier() { __fence{barrier}; }
// Exchange
-xchg(X,V) __xchg{mb}(X,V)
-xchg_relaxed(X,V) __xchg{once}(X,V)
-xchg_release(X,V) __xchg{release}(X,V)
-xchg_acquire(X,V) __xchg{acquire}(X,V)
-cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
-cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
-cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
-cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
+xchg(X,V) __xchg{MB}(X,V)
+xchg_relaxed(X,V) __xchg{ONCE}(X,V)
+xchg_release(X,V) __xchg{RELEASE}(X,V)
+xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
+cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
+cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
+cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
+cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
// Spinlocks
spin_lock(X) { __lock(X); }
@@ -63,86 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); }
atomic_read_acquire(X) smp_load_acquire(X)
atomic_set_release(X,V) { smp_store_release(X,V); }
-atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); }
-atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); }
-atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); }
-atomic_or(V,X) { __atomic_op{noreturn}(X,|,V); }
-atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); }
-atomic_inc(X) { __atomic_op{noreturn}(X,+,1); }
-atomic_dec(X) { __atomic_op{noreturn}(X,-,1); }
-atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); }
-
-atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
-atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
-atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
-atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
-atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
-atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
-atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
-atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
-
-atomic_fetch_and(V,X) __atomic_fetch_op{mb}(X,&,V)
-atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{once}(X,&,V)
-atomic_fetch_and_acquire(V,X) __atomic_fetch_op{acquire}(X,&,V)
-atomic_fetch_and_release(V,X) __atomic_fetch_op{release}(X,&,V)
-
-atomic_fetch_or(V,X) __atomic_fetch_op{mb}(X,|,V)
-atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{once}(X,|,V)
-atomic_fetch_or_acquire(V,X) __atomic_fetch_op{acquire}(X,|,V)
-atomic_fetch_or_release(V,X) __atomic_fetch_op{release}(X,|,V)
-
-atomic_fetch_xor(V,X) __atomic_fetch_op{mb}(X,^,V)
-atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{once}(X,^,V)
-atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{acquire}(X,^,V)
-atomic_fetch_xor_release(V,X) __atomic_fetch_op{release}(X,^,V)
-
-atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
-atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
-atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
-atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
-atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
-atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
-atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
-atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
-
-atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
-atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
-atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
-atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
-atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
-atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
-atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
-atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
-
-atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
-atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
-atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
-atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
-atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
-atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
-atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
-atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
-
-atomic_xchg(X,V) __xchg{mb}(X,V)
-atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
-atomic_xchg_release(X,V) __xchg{release}(X,V)
-atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
-atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
-atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
-atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
-atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
-
-atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
-atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
-atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
-atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
-atomic_add_negative_relaxed(V,X) __atomic_op_return{once}(X,+,V) < 0
-atomic_add_negative_acquire(V,X) __atomic_op_return{acquire}(X,+,V) < 0
-atomic_add_negative_release(V,X) __atomic_op_return{release}(X,+,V) < 0
-
-atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V)
-atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V)
-atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V)
-atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V)
-
-atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W)
+atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); }
+atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); }
+atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); }
+atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); }
+atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); }
+atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); }
+atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); }
+atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); }
+
+atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V)
+atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V)
+atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V)
+atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V)
+atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V)
+atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V)
+atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V)
+atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V)
+
+atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V)
+atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V)
+atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V)
+atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V)
+
+atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V)
+atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V)
+atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V)
+atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V)
+
+atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V)
+atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V)
+atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V)
+atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V)
+
+atomic_inc_return(X) __atomic_op_return{MB}(X,+,1)
+atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1)
+atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1)
+atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1)
+atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1)
+atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1)
+atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1)
+atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1)
+
+atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V)
+atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V)
+atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V)
+atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V)
+atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V)
+atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V)
+atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V)
+atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V)
+
+atomic_dec_return(X) __atomic_op_return{MB}(X,-,1)
+atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1)
+atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1)
+atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1)
+atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1)
+atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1)
+atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1)
+atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1)
+
+atomic_xchg(X,V) __xchg{MB}(X,V)
+atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V)
+atomic_xchg_release(X,V) __xchg{RELEASE}(X,V)
+atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
+atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
+atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
+atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
+atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
+
+atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0
+atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0
+atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0
+atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0
+atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0
+atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0
+atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0
+
+atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V)
+atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V)
+atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V)
+atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V)
+
+atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W)
--
2.34.1
On Mon, Sep 30, 2024 at 12:57:10PM +0200, Jonas Oberhauser wrote: > Not all tags that are always there syntactically also provide semantic > membership in the corresponding set. For example, an 'acquire tag on a Maybe: Not all annotated accesses provide the same semantic as their syntactic tags... ? > write does not imply that the write is finally in the Acquire set and > provides acquire ordering. > > To distinguish in those cases between the syntactic tags and actual > sets, we capitalize the former, so 'ACQUIRE tags may be present on both > reads and writes, but only reads will appear in the Acquire set. > > For tags where the two concepts are the same we do not use specific > capitalization to make this distinction. > > Reported-by: Boqun Feng <boqun.feng@gmail.com> > Signed-off-by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> Reviewed-by: Boqun Feng <boqun.feng@gmail.com> I've tested this series with the following herd version: 7.57+1, Rev: 8e7e63cd400e3eae8eac22dfdbd9dae0f243319b all the litmus tests in the litmus-tests/ and all the litmus tests downloaded and tested by scripts/checkghlitmus.sh (expected two tests with smp_memb()) passed. So feel free to add: Tested-by: Boqun Feng <boqun.feng@gmail.com> Regards, Boqun > --- > tools/memory-model/linux-kernel.bell | 22 +-- > tools/memory-model/linux-kernel.def | 198 +++++++++++++-------------- > 2 files changed, 110 insertions(+), 110 deletions(-) > > diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell > index 8ae47545df97..fe65998002b9 100644 > --- a/tools/memory-model/linux-kernel.bell > +++ b/tools/memory-model/linux-kernel.bell > @@ -13,18 +13,18 @@ > > "Linux-kernel memory consistency model" > > -enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) || > - 'release (*smp_store_release*) || > - 'acquire (*smp_load_acquire*) || > - 'noreturn (* R of non-return RMW *) || > - 'mb (*xchg(),cmpxchg(),...*) > +enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) || > + 'RELEASE (*smp_store_release*) || > + 'ACQUIRE (*smp_load_acquire*) || > + 'NORETURN (* R of non-return RMW *) || > + 'MB (*xchg(),cmpxchg(),...*) > instructions R[Accesses] > instructions W[Accesses] > instructions RMW[Accesses] > > enum Barriers = 'wmb (*smp_wmb*) || > 'rmb (*smp_rmb*) || > - 'mb (*smp_mb*) || > + 'MB (*smp_mb*) || > 'barrier (*barrier*) || > 'rcu-lock (*rcu_read_lock*) || > 'rcu-unlock (*rcu_read_unlock*) || > @@ -42,10 +42,10 @@ instructions F[Barriers] > * semantic ordering, such as Acquire on a store or Mb on a failed RMW. > *) > let FailedRMW = RMW \ (domain(rmw) | range(rmw)) > -let Acquire = Acquire \ W \ FailedRMW > -let Release = Release \ R \ FailedRMW > -let Mb = Mb \ FailedRMW > -let Noreturn = Noreturn \ W > +let Acquire = ACQUIRE \ W \ FailedRMW > +let Release = RELEASE \ R \ FailedRMW > +let Mb = MB \ FailedRMW > +let Noreturn = NORETURN \ W > > (* SRCU *) > enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu > @@ -85,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep > flag ~empty different-values(srcu-rscs) as srcu-bad-value-match > > (* Compute marked and plain memory accesses *) > -let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) | > +let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW | > LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock > let Plain = M \ Marked > > diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def > index d7279a357cba..49e402782e49 100644 > --- a/tools/memory-model/linux-kernel.def > +++ b/tools/memory-model/linux-kernel.def > @@ -6,18 +6,18 @@ > // which appeared in ASPLOS 2018. > > // ONCE > -READ_ONCE(X) __load{once}(X) > -WRITE_ONCE(X,V) { __store{once}(X,V); } > +READ_ONCE(X) __load{ONCE}(X) > +WRITE_ONCE(X,V) { __store{ONCE}(X,V); } > > // Release Acquire and friends > -smp_store_release(X,V) { __store{release}(*X,V); } > -smp_load_acquire(X) __load{acquire}(*X) > -rcu_assign_pointer(X,V) { __store{release}(X,V); } > -rcu_dereference(X) __load{once}(X) > -smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; } > +smp_store_release(X,V) { __store{RELEASE}(*X,V); } > +smp_load_acquire(X) __load{ACQUIRE}(*X) > +rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); } > +rcu_dereference(X) __load{ONCE}(X) > +smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; } > > // Fences > -smp_mb() { __fence{mb}; } > +smp_mb() { __fence{MB}; } > smp_rmb() { __fence{rmb}; } > smp_wmb() { __fence{wmb}; } > smp_mb__before_atomic() { __fence{before-atomic}; } > @@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; } > barrier() { __fence{barrier}; } > > // Exchange > -xchg(X,V) __xchg{mb}(X,V) > -xchg_relaxed(X,V) __xchg{once}(X,V) > -xchg_release(X,V) __xchg{release}(X,V) > -xchg_acquire(X,V) __xchg{acquire}(X,V) > -cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) > -cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) > -cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) > -cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) > +xchg(X,V) __xchg{MB}(X,V) > +xchg_relaxed(X,V) __xchg{ONCE}(X,V) > +xchg_release(X,V) __xchg{RELEASE}(X,V) > +xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) > +cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) > +cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) > +cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) > +cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) > > // Spinlocks > spin_lock(X) { __lock(X); } > @@ -63,86 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); } > atomic_read_acquire(X) smp_load_acquire(X) > atomic_set_release(X,V) { smp_store_release(X,V); } > > -atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); } > -atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); } > -atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); } > -atomic_or(V,X) { __atomic_op{noreturn}(X,|,V); } > -atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); } > -atomic_inc(X) { __atomic_op{noreturn}(X,+,1); } > -atomic_dec(X) { __atomic_op{noreturn}(X,-,1); } > -atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); } > - > -atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V) > -atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V) > -atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V) > -atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V) > -atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V) > -atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V) > -atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V) > -atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V) > - > -atomic_fetch_and(V,X) __atomic_fetch_op{mb}(X,&,V) > -atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{once}(X,&,V) > -atomic_fetch_and_acquire(V,X) __atomic_fetch_op{acquire}(X,&,V) > -atomic_fetch_and_release(V,X) __atomic_fetch_op{release}(X,&,V) > - > -atomic_fetch_or(V,X) __atomic_fetch_op{mb}(X,|,V) > -atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{once}(X,|,V) > -atomic_fetch_or_acquire(V,X) __atomic_fetch_op{acquire}(X,|,V) > -atomic_fetch_or_release(V,X) __atomic_fetch_op{release}(X,|,V) > - > -atomic_fetch_xor(V,X) __atomic_fetch_op{mb}(X,^,V) > -atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{once}(X,^,V) > -atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{acquire}(X,^,V) > -atomic_fetch_xor_release(V,X) __atomic_fetch_op{release}(X,^,V) > - > -atomic_inc_return(X) __atomic_op_return{mb}(X,+,1) > -atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1) > -atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1) > -atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1) > -atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1) > -atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1) > -atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1) > -atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1) > - > -atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V) > -atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V) > -atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V) > -atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V) > -atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V) > -atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V) > -atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V) > -atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V) > - > -atomic_dec_return(X) __atomic_op_return{mb}(X,-,1) > -atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1) > -atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1) > -atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1) > -atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1) > -atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1) > -atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1) > -atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1) > - > -atomic_xchg(X,V) __xchg{mb}(X,V) > -atomic_xchg_relaxed(X,V) __xchg{once}(X,V) > -atomic_xchg_release(X,V) __xchg{release}(X,V) > -atomic_xchg_acquire(X,V) __xchg{acquire}(X,V) > -atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) > -atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) > -atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) > -atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) > - > -atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0 > -atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0 > -atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0 > -atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0 > -atomic_add_negative_relaxed(V,X) __atomic_op_return{once}(X,+,V) < 0 > -atomic_add_negative_acquire(V,X) __atomic_op_return{acquire}(X,+,V) < 0 > -atomic_add_negative_release(V,X) __atomic_op_return{release}(X,+,V) < 0 > - > -atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V) > -atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V) > -atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V) > -atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V) > - > -atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W) > +atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); } > +atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); } > +atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); } > +atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); } > +atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); } > +atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); } > +atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); } > +atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); } > + > +atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V) > +atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) > +atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) > +atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V) > +atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V) > +atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V) > +atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V) > +atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V) > + > +atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V) > +atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V) > +atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V) > +atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V) > + > +atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V) > +atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V) > +atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V) > +atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V) > + > +atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V) > +atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V) > +atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V) > +atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V) > + > +atomic_inc_return(X) __atomic_op_return{MB}(X,+,1) > +atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1) > +atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1) > +atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1) > +atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1) > +atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1) > +atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1) > +atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1) > + > +atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V) > +atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V) > +atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V) > +atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V) > +atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V) > +atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V) > +atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V) > +atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V) > + > +atomic_dec_return(X) __atomic_op_return{MB}(X,-,1) > +atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1) > +atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1) > +atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1) > +atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1) > +atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1) > +atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1) > +atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1) > + > +atomic_xchg(X,V) __xchg{MB}(X,V) > +atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V) > +atomic_xchg_release(X,V) __xchg{RELEASE}(X,V) > +atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) > +atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) > +atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) > +atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) > +atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) > + > +atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0 > +atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0 > +atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0 > +atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0 > +atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0 > +atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0 > +atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0 > + > +atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V) > +atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V) > +atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V) > +atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V) > + > +atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W) > -- > 2.34.1 > >
On Mon, Oct 28, 2024 at 05:15:46PM -0700, Boqun Feng wrote: > On Mon, Sep 30, 2024 at 12:57:10PM +0200, Jonas Oberhauser wrote: > > Not all tags that are always there syntactically also provide semantic > > membership in the corresponding set. For example, an 'acquire tag on a > > Maybe: > > Not all annotated accesses provide the same semantic as their syntactic > tags... > > ? Jonas, are you OK with this change? If so, I can apply it on my next rebase. > > write does not imply that the write is finally in the Acquire set and > > provides acquire ordering. > > > > To distinguish in those cases between the syntactic tags and actual > > sets, we capitalize the former, so 'ACQUIRE tags may be present on both > > reads and writes, but only reads will appear in the Acquire set. > > > > For tags where the two concepts are the same we do not use specific > > capitalization to make this distinction. > > > > Reported-by: Boqun Feng <boqun.feng@gmail.com> > > Signed-off-by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> > > Reviewed-by: Boqun Feng <boqun.feng@gmail.com> > > I've tested this series with the following herd version: > > 7.57+1, Rev: 8e7e63cd400e3eae8eac22dfdbd9dae0f243319b > > all the litmus tests in the litmus-tests/ and all the litmus tests > downloaded and tested by scripts/checkghlitmus.sh (expected two tests > with smp_memb()) passed. So feel free to add: > > Tested-by: Boqun Feng <boqun.feng@gmail.com> Thank you, Boqun! I have applied your two tags to all five commits. Thanx, Paul > Regards, > Boqun > > > --- > > tools/memory-model/linux-kernel.bell | 22 +-- > > tools/memory-model/linux-kernel.def | 198 +++++++++++++-------------- > > 2 files changed, 110 insertions(+), 110 deletions(-) > > > > diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell > > index 8ae47545df97..fe65998002b9 100644 > > --- a/tools/memory-model/linux-kernel.bell > > +++ b/tools/memory-model/linux-kernel.bell > > @@ -13,18 +13,18 @@ > > > > "Linux-kernel memory consistency model" > > > > -enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) || > > - 'release (*smp_store_release*) || > > - 'acquire (*smp_load_acquire*) || > > - 'noreturn (* R of non-return RMW *) || > > - 'mb (*xchg(),cmpxchg(),...*) > > +enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) || > > + 'RELEASE (*smp_store_release*) || > > + 'ACQUIRE (*smp_load_acquire*) || > > + 'NORETURN (* R of non-return RMW *) || > > + 'MB (*xchg(),cmpxchg(),...*) > > instructions R[Accesses] > > instructions W[Accesses] > > instructions RMW[Accesses] > > > > enum Barriers = 'wmb (*smp_wmb*) || > > 'rmb (*smp_rmb*) || > > - 'mb (*smp_mb*) || > > + 'MB (*smp_mb*) || > > 'barrier (*barrier*) || > > 'rcu-lock (*rcu_read_lock*) || > > 'rcu-unlock (*rcu_read_unlock*) || > > @@ -42,10 +42,10 @@ instructions F[Barriers] > > * semantic ordering, such as Acquire on a store or Mb on a failed RMW. > > *) > > let FailedRMW = RMW \ (domain(rmw) | range(rmw)) > > -let Acquire = Acquire \ W \ FailedRMW > > -let Release = Release \ R \ FailedRMW > > -let Mb = Mb \ FailedRMW > > -let Noreturn = Noreturn \ W > > +let Acquire = ACQUIRE \ W \ FailedRMW > > +let Release = RELEASE \ R \ FailedRMW > > +let Mb = MB \ FailedRMW > > +let Noreturn = NORETURN \ W > > > > (* SRCU *) > > enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu > > @@ -85,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep > > flag ~empty different-values(srcu-rscs) as srcu-bad-value-match > > > > (* Compute marked and plain memory accesses *) > > -let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) | > > +let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW | > > LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock > > let Plain = M \ Marked > > > > diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def > > index d7279a357cba..49e402782e49 100644 > > --- a/tools/memory-model/linux-kernel.def > > +++ b/tools/memory-model/linux-kernel.def > > @@ -6,18 +6,18 @@ > > // which appeared in ASPLOS 2018. > > > > // ONCE > > -READ_ONCE(X) __load{once}(X) > > -WRITE_ONCE(X,V) { __store{once}(X,V); } > > +READ_ONCE(X) __load{ONCE}(X) > > +WRITE_ONCE(X,V) { __store{ONCE}(X,V); } > > > > // Release Acquire and friends > > -smp_store_release(X,V) { __store{release}(*X,V); } > > -smp_load_acquire(X) __load{acquire}(*X) > > -rcu_assign_pointer(X,V) { __store{release}(X,V); } > > -rcu_dereference(X) __load{once}(X) > > -smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; } > > +smp_store_release(X,V) { __store{RELEASE}(*X,V); } > > +smp_load_acquire(X) __load{ACQUIRE}(*X) > > +rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); } > > +rcu_dereference(X) __load{ONCE}(X) > > +smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; } > > > > // Fences > > -smp_mb() { __fence{mb}; } > > +smp_mb() { __fence{MB}; } > > smp_rmb() { __fence{rmb}; } > > smp_wmb() { __fence{wmb}; } > > smp_mb__before_atomic() { __fence{before-atomic}; } > > @@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; } > > barrier() { __fence{barrier}; } > > > > // Exchange > > -xchg(X,V) __xchg{mb}(X,V) > > -xchg_relaxed(X,V) __xchg{once}(X,V) > > -xchg_release(X,V) __xchg{release}(X,V) > > -xchg_acquire(X,V) __xchg{acquire}(X,V) > > -cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) > > -cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) > > -cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) > > -cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) > > +xchg(X,V) __xchg{MB}(X,V) > > +xchg_relaxed(X,V) __xchg{ONCE}(X,V) > > +xchg_release(X,V) __xchg{RELEASE}(X,V) > > +xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) > > +cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) > > +cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) > > +cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) > > +cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) > > > > // Spinlocks > > spin_lock(X) { __lock(X); } > > @@ -63,86 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); } > > atomic_read_acquire(X) smp_load_acquire(X) > > atomic_set_release(X,V) { smp_store_release(X,V); } > > > > -atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); } > > -atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); } > > -atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); } > > -atomic_or(V,X) { __atomic_op{noreturn}(X,|,V); } > > -atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); } > > -atomic_inc(X) { __atomic_op{noreturn}(X,+,1); } > > -atomic_dec(X) { __atomic_op{noreturn}(X,-,1); } > > -atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); } > > - > > -atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V) > > -atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V) > > -atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V) > > -atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V) > > -atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V) > > -atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V) > > -atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V) > > -atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V) > > - > > -atomic_fetch_and(V,X) __atomic_fetch_op{mb}(X,&,V) > > -atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{once}(X,&,V) > > -atomic_fetch_and_acquire(V,X) __atomic_fetch_op{acquire}(X,&,V) > > -atomic_fetch_and_release(V,X) __atomic_fetch_op{release}(X,&,V) > > - > > -atomic_fetch_or(V,X) __atomic_fetch_op{mb}(X,|,V) > > -atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{once}(X,|,V) > > -atomic_fetch_or_acquire(V,X) __atomic_fetch_op{acquire}(X,|,V) > > -atomic_fetch_or_release(V,X) __atomic_fetch_op{release}(X,|,V) > > - > > -atomic_fetch_xor(V,X) __atomic_fetch_op{mb}(X,^,V) > > -atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{once}(X,^,V) > > -atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{acquire}(X,^,V) > > -atomic_fetch_xor_release(V,X) __atomic_fetch_op{release}(X,^,V) > > - > > -atomic_inc_return(X) __atomic_op_return{mb}(X,+,1) > > -atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1) > > -atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1) > > -atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1) > > -atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1) > > -atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1) > > -atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1) > > -atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1) > > - > > -atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V) > > -atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V) > > -atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V) > > -atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V) > > -atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V) > > -atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V) > > -atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V) > > -atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V) > > - > > -atomic_dec_return(X) __atomic_op_return{mb}(X,-,1) > > -atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1) > > -atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1) > > -atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1) > > -atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1) > > -atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1) > > -atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1) > > -atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1) > > - > > -atomic_xchg(X,V) __xchg{mb}(X,V) > > -atomic_xchg_relaxed(X,V) __xchg{once}(X,V) > > -atomic_xchg_release(X,V) __xchg{release}(X,V) > > -atomic_xchg_acquire(X,V) __xchg{acquire}(X,V) > > -atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) > > -atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) > > -atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) > > -atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) > > - > > -atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0 > > -atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0 > > -atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0 > > -atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0 > > -atomic_add_negative_relaxed(V,X) __atomic_op_return{once}(X,+,V) < 0 > > -atomic_add_negative_acquire(V,X) __atomic_op_return{acquire}(X,+,V) < 0 > > -atomic_add_negative_release(V,X) __atomic_op_return{release}(X,+,V) < 0 > > - > > -atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V) > > -atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V) > > -atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V) > > -atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V) > > - > > -atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W) > > +atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); } > > +atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); } > > +atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); } > > +atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); } > > +atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); } > > +atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); } > > +atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); } > > +atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); } > > + > > +atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V) > > +atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) > > +atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) > > +atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V) > > +atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V) > > +atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V) > > +atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V) > > +atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V) > > + > > +atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V) > > +atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V) > > +atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V) > > +atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V) > > + > > +atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V) > > +atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V) > > +atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V) > > +atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V) > > + > > +atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V) > > +atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V) > > +atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V) > > +atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V) > > + > > +atomic_inc_return(X) __atomic_op_return{MB}(X,+,1) > > +atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1) > > +atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1) > > +atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1) > > +atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1) > > +atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1) > > +atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1) > > +atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1) > > + > > +atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V) > > +atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V) > > +atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V) > > +atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V) > > +atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V) > > +atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V) > > +atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V) > > +atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V) > > + > > +atomic_dec_return(X) __atomic_op_return{MB}(X,-,1) > > +atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1) > > +atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1) > > +atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1) > > +atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1) > > +atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1) > > +atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1) > > +atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1) > > + > > +atomic_xchg(X,V) __xchg{MB}(X,V) > > +atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V) > > +atomic_xchg_release(X,V) __xchg{RELEASE}(X,V) > > +atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) > > +atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) > > +atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) > > +atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) > > +atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) > > + > > +atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0 > > +atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0 > > +atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0 > > +atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0 > > +atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0 > > +atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0 > > +atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0 > > + > > +atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V) > > +atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V) > > +atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V) > > +atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V) > > + > > +atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W) > > -- > > 2.34.1 > > > >
Am 10/30/2024 um 12:41 AM schrieb Paul E. McKenney: > On Mon, Oct 28, 2024 at 05:15:46PM -0700, Boqun Feng wrote: >> On Mon, Sep 30, 2024 at 12:57:10PM +0200, Jonas Oberhauser wrote: >>> Not all tags that are always there syntactically also provide semantic >>> membership in the corresponding set. For example, an 'acquire tag on a >> >> Maybe: >> >> Not all annotated accesses provide the same semantic as their syntactic >> tags... >> >> ? > > Jonas, are you OK with this change? If so, I can apply it on my next > rebase. > I'm ok with an extra s after semantics and a minor rephrase: Not all annotated accesses provide the semantics their syntactic tags would imply What do you think @Boqun ? jonas
On Wed, Oct 30, 2024 at 12:38:26PM +0100, Jonas Oberhauser wrote: > > > Am 10/30/2024 um 12:41 AM schrieb Paul E. McKenney: > > On Mon, Oct 28, 2024 at 05:15:46PM -0700, Boqun Feng wrote: > > > On Mon, Sep 30, 2024 at 12:57:10PM +0200, Jonas Oberhauser wrote: > > > > Not all tags that are always there syntactically also provide semantic > > > > membership in the corresponding set. For example, an 'acquire tag on a > > > > > > Maybe: > > > > > > Not all annotated accesses provide the same semantic as their syntactic > > > tags... > > > > > > ? > > > > Jonas, are you OK with this change? If so, I can apply it on my next > > rebase. > > > > I'm ok with an extra s after semantics and a minor rephrase: > > Not all annotated accesses provide the semantics their syntactic > tags would imply > > > What do you think @Boqun ? > Yes, of course! This looks good to me. Regards, Boqun > > jonas >
On Wed, Oct 30, 2024 at 07:34:45AM -0700, Boqun Feng wrote: > On Wed, Oct 30, 2024 at 12:38:26PM +0100, Jonas Oberhauser wrote: > > > > > > Am 10/30/2024 um 12:41 AM schrieb Paul E. McKenney: > > > On Mon, Oct 28, 2024 at 05:15:46PM -0700, Boqun Feng wrote: > > > > On Mon, Sep 30, 2024 at 12:57:10PM +0200, Jonas Oberhauser wrote: > > > > > Not all tags that are always there syntactically also provide semantic > > > > > membership in the corresponding set. For example, an 'acquire tag on a > > > > > > > > Maybe: > > > > > > > > Not all annotated accesses provide the same semantic as their syntactic > > > > tags... > > > > > > > > ? > > > > > > Jonas, are you OK with this change? If so, I can apply it on my next > > > rebase. > > > > > > > I'm ok with an extra s after semantics and a minor rephrase: > > > > Not all annotated accesses provide the semantics their syntactic > > tags would imply > > > > What do you think @Boqun ? > > Yes, of course! This looks good to me. Please see below for what I currently have. If there are no objections in a day or so, I will set up these five commits for the upcoming v6.13 merge window. The additional bit pointed out by Boqun [1] can be addressed by a separate commit. Thanx, Paul [1] https://lore.kernel.org/all/ZyAmlh5GDBsqY0sZ@Boquns-Mac-mini.local/ ------------------------------------------------------------------------ commit c53d54ed7e40255ea0ea66dd121672fd22423326 Author: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> Date: Mon Sep 30 12:57:10 2024 +0200 tools/memory-model: Distinguish between syntactic and semantic tags Not all annotated accesses provide the semantics their syntactic tags would imply. For example, an 'acquire tag on a write does not imply that the write is finally in the Acquire set and provides acquire ordering. To distinguish in those cases between the syntactic tags and actual sets, we capitalize the former, so 'ACQUIRE tags may be present on both reads and writes, but only reads will appear in the Acquire set. For tags where the two concepts are the same we do not use specific capitalization to make this distinction. Reported-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Boqun Feng <boqun.feng@gmail.com> Tested-by: Boqun Feng <boqun.feng@gmail.com> diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell index 8ae47545df978..fe65998002b99 100644 --- a/tools/memory-model/linux-kernel.bell +++ b/tools/memory-model/linux-kernel.bell @@ -13,18 +13,18 @@ "Linux-kernel memory consistency model" -enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) || - 'release (*smp_store_release*) || - 'acquire (*smp_load_acquire*) || - 'noreturn (* R of non-return RMW *) || - 'mb (*xchg(),cmpxchg(),...*) +enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) || + 'RELEASE (*smp_store_release*) || + 'ACQUIRE (*smp_load_acquire*) || + 'NORETURN (* R of non-return RMW *) || + 'MB (*xchg(),cmpxchg(),...*) instructions R[Accesses] instructions W[Accesses] instructions RMW[Accesses] enum Barriers = 'wmb (*smp_wmb*) || 'rmb (*smp_rmb*) || - 'mb (*smp_mb*) || + 'MB (*smp_mb*) || 'barrier (*barrier*) || 'rcu-lock (*rcu_read_lock*) || 'rcu-unlock (*rcu_read_unlock*) || @@ -42,10 +42,10 @@ instructions F[Barriers] * semantic ordering, such as Acquire on a store or Mb on a failed RMW. *) let FailedRMW = RMW \ (domain(rmw) | range(rmw)) -let Acquire = Acquire \ W \ FailedRMW -let Release = Release \ R \ FailedRMW -let Mb = Mb \ FailedRMW -let Noreturn = Noreturn \ W +let Acquire = ACQUIRE \ W \ FailedRMW +let Release = RELEASE \ R \ FailedRMW +let Mb = MB \ FailedRMW +let Noreturn = NORETURN \ W (* SRCU *) enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu @@ -85,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep flag ~empty different-values(srcu-rscs) as srcu-bad-value-match (* Compute marked and plain memory accesses *) -let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) | +let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW | LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock let Plain = M \ Marked diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def index d7279a357cba0..49e402782e49c 100644 --- a/tools/memory-model/linux-kernel.def +++ b/tools/memory-model/linux-kernel.def @@ -6,18 +6,18 @@ // which appeared in ASPLOS 2018. // ONCE -READ_ONCE(X) __load{once}(X) -WRITE_ONCE(X,V) { __store{once}(X,V); } +READ_ONCE(X) __load{ONCE}(X) +WRITE_ONCE(X,V) { __store{ONCE}(X,V); } // Release Acquire and friends -smp_store_release(X,V) { __store{release}(*X,V); } -smp_load_acquire(X) __load{acquire}(*X) -rcu_assign_pointer(X,V) { __store{release}(X,V); } -rcu_dereference(X) __load{once}(X) -smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; } +smp_store_release(X,V) { __store{RELEASE}(*X,V); } +smp_load_acquire(X) __load{ACQUIRE}(*X) +rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); } +rcu_dereference(X) __load{ONCE}(X) +smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; } // Fences -smp_mb() { __fence{mb}; } +smp_mb() { __fence{MB}; } smp_rmb() { __fence{rmb}; } smp_wmb() { __fence{wmb}; } smp_mb__before_atomic() { __fence{before-atomic}; } @@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; } barrier() { __fence{barrier}; } // Exchange -xchg(X,V) __xchg{mb}(X,V) -xchg_relaxed(X,V) __xchg{once}(X,V) -xchg_release(X,V) __xchg{release}(X,V) -xchg_acquire(X,V) __xchg{acquire}(X,V) -cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) -cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) -cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) -cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) +xchg(X,V) __xchg{MB}(X,V) +xchg_relaxed(X,V) __xchg{ONCE}(X,V) +xchg_release(X,V) __xchg{RELEASE}(X,V) +xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) +cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) +cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) +cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) +cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) // Spinlocks spin_lock(X) { __lock(X); } @@ -63,86 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); } atomic_read_acquire(X) smp_load_acquire(X) atomic_set_release(X,V) { smp_store_release(X,V); } -atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); } -atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); } -atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); } -atomic_or(V,X) { __atomic_op{noreturn}(X,|,V); } -atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); } -atomic_inc(X) { __atomic_op{noreturn}(X,+,1); } -atomic_dec(X) { __atomic_op{noreturn}(X,-,1); } -atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); } - -atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V) -atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V) -atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V) -atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V) -atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V) -atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V) -atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V) -atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V) - -atomic_fetch_and(V,X) __atomic_fetch_op{mb}(X,&,V) -atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{once}(X,&,V) -atomic_fetch_and_acquire(V,X) __atomic_fetch_op{acquire}(X,&,V) -atomic_fetch_and_release(V,X) __atomic_fetch_op{release}(X,&,V) - -atomic_fetch_or(V,X) __atomic_fetch_op{mb}(X,|,V) -atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{once}(X,|,V) -atomic_fetch_or_acquire(V,X) __atomic_fetch_op{acquire}(X,|,V) -atomic_fetch_or_release(V,X) __atomic_fetch_op{release}(X,|,V) - -atomic_fetch_xor(V,X) __atomic_fetch_op{mb}(X,^,V) -atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{once}(X,^,V) -atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{acquire}(X,^,V) -atomic_fetch_xor_release(V,X) __atomic_fetch_op{release}(X,^,V) - -atomic_inc_return(X) __atomic_op_return{mb}(X,+,1) -atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1) -atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1) -atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1) -atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1) -atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1) -atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1) -atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1) - -atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V) -atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V) -atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V) -atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V) -atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V) -atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V) -atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V) -atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V) - -atomic_dec_return(X) __atomic_op_return{mb}(X,-,1) -atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1) -atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1) -atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1) -atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1) -atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1) -atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1) -atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1) - -atomic_xchg(X,V) __xchg{mb}(X,V) -atomic_xchg_relaxed(X,V) __xchg{once}(X,V) -atomic_xchg_release(X,V) __xchg{release}(X,V) -atomic_xchg_acquire(X,V) __xchg{acquire}(X,V) -atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) -atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) -atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) -atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) - -atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0 -atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0 -atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0 -atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0 -atomic_add_negative_relaxed(V,X) __atomic_op_return{once}(X,+,V) < 0 -atomic_add_negative_acquire(V,X) __atomic_op_return{acquire}(X,+,V) < 0 -atomic_add_negative_release(V,X) __atomic_op_return{release}(X,+,V) < 0 - -atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V) -atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V) -atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V) -atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V) - -atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W) +atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); } +atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); } +atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); } +atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); } +atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); } +atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); } +atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); } +atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); } + +atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V) +atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) +atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) +atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V) +atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V) +atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V) +atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V) +atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V) + +atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V) +atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V) +atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V) +atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V) + +atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V) +atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V) +atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V) +atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V) + +atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V) +atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V) +atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V) +atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V) + +atomic_inc_return(X) __atomic_op_return{MB}(X,+,1) +atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1) +atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1) +atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1) +atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1) +atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1) +atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1) +atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1) + +atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V) +atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V) +atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V) +atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V) +atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V) +atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V) +atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V) +atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V) + +atomic_dec_return(X) __atomic_op_return{MB}(X,-,1) +atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1) +atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1) +atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1) +atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1) +atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1) +atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1) +atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1) + +atomic_xchg(X,V) __xchg{MB}(X,V) +atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V) +atomic_xchg_release(X,V) __xchg{RELEASE}(X,V) +atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) +atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) +atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) +atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) +atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) + +atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0 +atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0 +atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0 +atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0 +atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0 +atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0 +atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0 + +atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V) +atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V) +atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V) +atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V) + +atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W)
Am 11/5/2024 um 9:21 PM schrieb Paul E. McKenney: > On Wed, Oct 30, 2024 at 07:34:45AM -0700, Boqun Feng wrote: >> On Wed, Oct 30, 2024 at 12:38:26PM +0100, Jonas Oberhauser wrote: >>> >>> >>> Am 10/30/2024 um 12:41 AM schrieb Paul E. McKenney: >>>> On Mon, Oct 28, 2024 at 05:15:46PM -0700, Boqun Feng wrote: >>>>> On Mon, Sep 30, 2024 at 12:57:10PM +0200, Jonas Oberhauser wrote: >>>>>> Not all tags that are always there syntactically also provide semantic >>>>>> membership in the corresponding set. For example, an 'acquire tag on a >>>>> >>>>> Maybe: >>>>> >>>>> Not all annotated accesses provide the same semantic as their syntactic >>>>> tags... >>>>> >>>>> ? >>>> >>>> Jonas, are you OK with this change? If so, I can apply it on my next >>>> rebase. >>>> >>> >>> I'm ok with an extra s after semantics and a minor rephrase: >>> >>> Not all annotated accesses provide the semantics their syntactic >>> tags would imply >>> >>> What do you think @Boqun ? >> >> Yes, of course! This looks good to me. > > Please see below for what I currently have. If there are no objections > in a day or so, I will set up these five commits for the upcoming v6.13 > merge window. > > The additional bit pointed out by Boqun [1] can be addressed by a > separate commit. > > Thanx, Paul > > [1] https://lore.kernel.org/all/ZyAmlh5GDBsqY0sZ@Boquns-Mac-mini.local/ I'm confused, did I forget to add the fix to the capitalization issue discovered by Boqun to the fixed commit? I vividly remember typing git commit add ... jonas
On Wed, Nov 06, 2024 at 11:28:28AM +0100, Jonas Oberhauser wrote: > > > Am 11/5/2024 um 9:21 PM schrieb Paul E. McKenney: > > On Wed, Oct 30, 2024 at 07:34:45AM -0700, Boqun Feng wrote: > > > On Wed, Oct 30, 2024 at 12:38:26PM +0100, Jonas Oberhauser wrote: > > > > > > > > > > > > Am 10/30/2024 um 12:41 AM schrieb Paul E. McKenney: > > > > > On Mon, Oct 28, 2024 at 05:15:46PM -0700, Boqun Feng wrote: > > > > > > On Mon, Sep 30, 2024 at 12:57:10PM +0200, Jonas Oberhauser wrote: > > > > > > > Not all tags that are always there syntactically also provide semantic > > > > > > > membership in the corresponding set. For example, an 'acquire tag on a > > > > > > > > > > > > Maybe: > > > > > > > > > > > > Not all annotated accesses provide the same semantic as their syntactic > > > > > > tags... > > > > > > > > > > > > ? > > > > > > > > > > Jonas, are you OK with this change? If so, I can apply it on my next > > > > > rebase. > > > > > > > > > > > > > I'm ok with an extra s after semantics and a minor rephrase: > > > > > > > > Not all annotated accesses provide the semantics their syntactic > > > > tags would imply > > > > > > > > What do you think @Boqun ? > > > > > > Yes, of course! This looks good to me. > > > > Please see below for what I currently have. If there are no objections > > in a day or so, I will set up these five commits for the upcoming v6.13 > > merge window. > > > > The additional bit pointed out by Boqun [1] can be addressed by a > > separate commit. > > > > Thanx, Paul > > > > [1] https://lore.kernel.org/all/ZyAmlh5GDBsqY0sZ@Boquns-Mac-mini.local/ > > I'm confused, did I forget to add the fix to the capitalization issue > discovered by Boqun to the fixed commit? I vividly remember typing git > commit add ... It is quite possible that I have queued an old version of the patch. Could you please check this commit on -rcu, shown below? c53d54ed7e40 ("tools/memory-model: Distinguish between syntactic and semantic tags") If this is the wrong one, please point me to the right one. Thanx, Paul ------------------------------------------------------------------------ commit c53d54ed7e40255ea0ea66dd121672fd22423326 Author: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> Date: Mon Sep 30 12:57:10 2024 +0200 tools/memory-model: Distinguish between syntactic and semantic tags Not all annotated accesses provide the semantics their syntactic tags would imply. For example, an 'acquire tag on a write does not imply that the write is finally in the Acquire set and provides acquire ordering. To distinguish in those cases between the syntactic tags and actual sets, we capitalize the former, so 'ACQUIRE tags may be present on both reads and writes, but only reads will appear in the Acquire set. For tags where the two concepts are the same we do not use specific capitalization to make this distinction. Reported-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Boqun Feng <boqun.feng@gmail.com> Tested-by: Boqun Feng <boqun.feng@gmail.com> diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell index 8ae47545df978..fe65998002b99 100644 --- a/tools/memory-model/linux-kernel.bell +++ b/tools/memory-model/linux-kernel.bell @@ -13,18 +13,18 @@ "Linux-kernel memory consistency model" -enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) || - 'release (*smp_store_release*) || - 'acquire (*smp_load_acquire*) || - 'noreturn (* R of non-return RMW *) || - 'mb (*xchg(),cmpxchg(),...*) +enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) || + 'RELEASE (*smp_store_release*) || + 'ACQUIRE (*smp_load_acquire*) || + 'NORETURN (* R of non-return RMW *) || + 'MB (*xchg(),cmpxchg(),...*) instructions R[Accesses] instructions W[Accesses] instructions RMW[Accesses] enum Barriers = 'wmb (*smp_wmb*) || 'rmb (*smp_rmb*) || - 'mb (*smp_mb*) || + 'MB (*smp_mb*) || 'barrier (*barrier*) || 'rcu-lock (*rcu_read_lock*) || 'rcu-unlock (*rcu_read_unlock*) || @@ -42,10 +42,10 @@ instructions F[Barriers] * semantic ordering, such as Acquire on a store or Mb on a failed RMW. *) let FailedRMW = RMW \ (domain(rmw) | range(rmw)) -let Acquire = Acquire \ W \ FailedRMW -let Release = Release \ R \ FailedRMW -let Mb = Mb \ FailedRMW -let Noreturn = Noreturn \ W +let Acquire = ACQUIRE \ W \ FailedRMW +let Release = RELEASE \ R \ FailedRMW +let Mb = MB \ FailedRMW +let Noreturn = NORETURN \ W (* SRCU *) enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu @@ -85,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep flag ~empty different-values(srcu-rscs) as srcu-bad-value-match (* Compute marked and plain memory accesses *) -let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) | +let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW | LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock let Plain = M \ Marked diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def index d7279a357cba0..49e402782e49c 100644 --- a/tools/memory-model/linux-kernel.def +++ b/tools/memory-model/linux-kernel.def @@ -6,18 +6,18 @@ // which appeared in ASPLOS 2018. // ONCE -READ_ONCE(X) __load{once}(X) -WRITE_ONCE(X,V) { __store{once}(X,V); } +READ_ONCE(X) __load{ONCE}(X) +WRITE_ONCE(X,V) { __store{ONCE}(X,V); } // Release Acquire and friends -smp_store_release(X,V) { __store{release}(*X,V); } -smp_load_acquire(X) __load{acquire}(*X) -rcu_assign_pointer(X,V) { __store{release}(X,V); } -rcu_dereference(X) __load{once}(X) -smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; } +smp_store_release(X,V) { __store{RELEASE}(*X,V); } +smp_load_acquire(X) __load{ACQUIRE}(*X) +rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); } +rcu_dereference(X) __load{ONCE}(X) +smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; } // Fences -smp_mb() { __fence{mb}; } +smp_mb() { __fence{MB}; } smp_rmb() { __fence{rmb}; } smp_wmb() { __fence{wmb}; } smp_mb__before_atomic() { __fence{before-atomic}; } @@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; } barrier() { __fence{barrier}; } // Exchange -xchg(X,V) __xchg{mb}(X,V) -xchg_relaxed(X,V) __xchg{once}(X,V) -xchg_release(X,V) __xchg{release}(X,V) -xchg_acquire(X,V) __xchg{acquire}(X,V) -cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) -cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) -cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) -cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) +xchg(X,V) __xchg{MB}(X,V) +xchg_relaxed(X,V) __xchg{ONCE}(X,V) +xchg_release(X,V) __xchg{RELEASE}(X,V) +xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) +cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) +cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) +cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) +cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) // Spinlocks spin_lock(X) { __lock(X); } @@ -63,86 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); } atomic_read_acquire(X) smp_load_acquire(X) atomic_set_release(X,V) { smp_store_release(X,V); } -atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); } -atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); } -atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); } -atomic_or(V,X) { __atomic_op{noreturn}(X,|,V); } -atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); } -atomic_inc(X) { __atomic_op{noreturn}(X,+,1); } -atomic_dec(X) { __atomic_op{noreturn}(X,-,1); } -atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); } - -atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V) -atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V) -atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V) -atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V) -atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V) -atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V) -atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V) -atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V) - -atomic_fetch_and(V,X) __atomic_fetch_op{mb}(X,&,V) -atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{once}(X,&,V) -atomic_fetch_and_acquire(V,X) __atomic_fetch_op{acquire}(X,&,V) -atomic_fetch_and_release(V,X) __atomic_fetch_op{release}(X,&,V) - -atomic_fetch_or(V,X) __atomic_fetch_op{mb}(X,|,V) -atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{once}(X,|,V) -atomic_fetch_or_acquire(V,X) __atomic_fetch_op{acquire}(X,|,V) -atomic_fetch_or_release(V,X) __atomic_fetch_op{release}(X,|,V) - -atomic_fetch_xor(V,X) __atomic_fetch_op{mb}(X,^,V) -atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{once}(X,^,V) -atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{acquire}(X,^,V) -atomic_fetch_xor_release(V,X) __atomic_fetch_op{release}(X,^,V) - -atomic_inc_return(X) __atomic_op_return{mb}(X,+,1) -atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1) -atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1) -atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1) -atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1) -atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1) -atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1) -atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1) - -atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V) -atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V) -atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V) -atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V) -atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V) -atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V) -atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V) -atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V) - -atomic_dec_return(X) __atomic_op_return{mb}(X,-,1) -atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1) -atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1) -atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1) -atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1) -atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1) -atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1) -atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1) - -atomic_xchg(X,V) __xchg{mb}(X,V) -atomic_xchg_relaxed(X,V) __xchg{once}(X,V) -atomic_xchg_release(X,V) __xchg{release}(X,V) -atomic_xchg_acquire(X,V) __xchg{acquire}(X,V) -atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) -atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) -atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) -atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) - -atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0 -atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0 -atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0 -atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0 -atomic_add_negative_relaxed(V,X) __atomic_op_return{once}(X,+,V) < 0 -atomic_add_negative_acquire(V,X) __atomic_op_return{acquire}(X,+,V) < 0 -atomic_add_negative_release(V,X) __atomic_op_return{release}(X,+,V) < 0 - -atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V) -atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V) -atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V) -atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V) - -atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W) +atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); } +atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); } +atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); } +atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); } +atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); } +atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); } +atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); } +atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); } + +atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V) +atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) +atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) +atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V) +atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V) +atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V) +atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V) +atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V) + +atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V) +atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V) +atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V) +atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V) + +atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V) +atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V) +atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V) +atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V) + +atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V) +atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V) +atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V) +atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V) + +atomic_inc_return(X) __atomic_op_return{MB}(X,+,1) +atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1) +atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1) +atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1) +atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1) +atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1) +atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1) +atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1) + +atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V) +atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V) +atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V) +atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V) +atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V) +atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V) +atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V) +atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V) + +atomic_dec_return(X) __atomic_op_return{MB}(X,-,1) +atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1) +atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1) +atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1) +atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1) +atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1) +atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1) +atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1) + +atomic_xchg(X,V) __xchg{MB}(X,V) +atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V) +atomic_xchg_release(X,V) __xchg{RELEASE}(X,V) +atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) +atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) +atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) +atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) +atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) + +atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0 +atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0 +atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0 +atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0 +atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0 +atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0 +atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0 + +atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V) +atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V) +atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V) +atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V) + +atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W)
Never mind, I found your patch in my spam folder. Apologies, and I thought that had been fixed. I will replace the commit with your updated patch. Thanx, Paul On Wed, Nov 06, 2024 at 07:04:57AM -0800, Paul E. McKenney wrote: > On Wed, Nov 06, 2024 at 11:28:28AM +0100, Jonas Oberhauser wrote: > > > > > > Am 11/5/2024 um 9:21 PM schrieb Paul E. McKenney: > > > On Wed, Oct 30, 2024 at 07:34:45AM -0700, Boqun Feng wrote: > > > > On Wed, Oct 30, 2024 at 12:38:26PM +0100, Jonas Oberhauser wrote: > > > > > > > > > > > > > > > Am 10/30/2024 um 12:41 AM schrieb Paul E. McKenney: > > > > > > On Mon, Oct 28, 2024 at 05:15:46PM -0700, Boqun Feng wrote: > > > > > > > On Mon, Sep 30, 2024 at 12:57:10PM +0200, Jonas Oberhauser wrote: > > > > > > > > Not all tags that are always there syntactically also provide semantic > > > > > > > > membership in the corresponding set. For example, an 'acquire tag on a > > > > > > > > > > > > > > Maybe: > > > > > > > > > > > > > > Not all annotated accesses provide the same semantic as their syntactic > > > > > > > tags... > > > > > > > > > > > > > > ? > > > > > > > > > > > > Jonas, are you OK with this change? If so, I can apply it on my next > > > > > > rebase. > > > > > > > > > > > > > > > > I'm ok with an extra s after semantics and a minor rephrase: > > > > > > > > > > Not all annotated accesses provide the semantics their syntactic > > > > > tags would imply > > > > > > > > > > What do you think @Boqun ? > > > > > > > > Yes, of course! This looks good to me. > > > > > > Please see below for what I currently have. If there are no objections > > > in a day or so, I will set up these five commits for the upcoming v6.13 > > > merge window. > > > > > > The additional bit pointed out by Boqun [1] can be addressed by a > > > separate commit. > > > > > > Thanx, Paul > > > > > > [1] https://lore.kernel.org/all/ZyAmlh5GDBsqY0sZ@Boquns-Mac-mini.local/ > > > > I'm confused, did I forget to add the fix to the capitalization issue > > discovered by Boqun to the fixed commit? I vividly remember typing git > > commit add ... > > It is quite possible that I have queued an old version of the patch. > Could you please check this commit on -rcu, shown below? > > c53d54ed7e40 ("tools/memory-model: Distinguish between syntactic and semantic tags") > > If this is the wrong one, please point me to the right one. > > Thanx, Paul > > ------------------------------------------------------------------------ > > commit c53d54ed7e40255ea0ea66dd121672fd22423326 > Author: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> > Date: Mon Sep 30 12:57:10 2024 +0200 > > tools/memory-model: Distinguish between syntactic and semantic tags > > Not all annotated accesses provide the semantics their syntactic tags > would imply. For example, an 'acquire tag on a write does not imply that > the write is finally in the Acquire set and provides acquire ordering. > > To distinguish in those cases between the syntactic tags and actual > sets, we capitalize the former, so 'ACQUIRE tags may be present on both > reads and writes, but only reads will appear in the Acquire set. > > For tags where the two concepts are the same we do not use specific > capitalization to make this distinction. > > Reported-by: Boqun Feng <boqun.feng@gmail.com> > Signed-off-by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org> > Reviewed-by: Boqun Feng <boqun.feng@gmail.com> > Tested-by: Boqun Feng <boqun.feng@gmail.com> > > diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell > index 8ae47545df978..fe65998002b99 100644 > --- a/tools/memory-model/linux-kernel.bell > +++ b/tools/memory-model/linux-kernel.bell > @@ -13,18 +13,18 @@ > > "Linux-kernel memory consistency model" > > -enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) || > - 'release (*smp_store_release*) || > - 'acquire (*smp_load_acquire*) || > - 'noreturn (* R of non-return RMW *) || > - 'mb (*xchg(),cmpxchg(),...*) > +enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) || > + 'RELEASE (*smp_store_release*) || > + 'ACQUIRE (*smp_load_acquire*) || > + 'NORETURN (* R of non-return RMW *) || > + 'MB (*xchg(),cmpxchg(),...*) > instructions R[Accesses] > instructions W[Accesses] > instructions RMW[Accesses] > > enum Barriers = 'wmb (*smp_wmb*) || > 'rmb (*smp_rmb*) || > - 'mb (*smp_mb*) || > + 'MB (*smp_mb*) || > 'barrier (*barrier*) || > 'rcu-lock (*rcu_read_lock*) || > 'rcu-unlock (*rcu_read_unlock*) || > @@ -42,10 +42,10 @@ instructions F[Barriers] > * semantic ordering, such as Acquire on a store or Mb on a failed RMW. > *) > let FailedRMW = RMW \ (domain(rmw) | range(rmw)) > -let Acquire = Acquire \ W \ FailedRMW > -let Release = Release \ R \ FailedRMW > -let Mb = Mb \ FailedRMW > -let Noreturn = Noreturn \ W > +let Acquire = ACQUIRE \ W \ FailedRMW > +let Release = RELEASE \ R \ FailedRMW > +let Mb = MB \ FailedRMW > +let Noreturn = NORETURN \ W > > (* SRCU *) > enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu > @@ -85,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep > flag ~empty different-values(srcu-rscs) as srcu-bad-value-match > > (* Compute marked and plain memory accesses *) > -let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) | > +let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW | > LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock > let Plain = M \ Marked > > diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def > index d7279a357cba0..49e402782e49c 100644 > --- a/tools/memory-model/linux-kernel.def > +++ b/tools/memory-model/linux-kernel.def > @@ -6,18 +6,18 @@ > // which appeared in ASPLOS 2018. > > // ONCE > -READ_ONCE(X) __load{once}(X) > -WRITE_ONCE(X,V) { __store{once}(X,V); } > +READ_ONCE(X) __load{ONCE}(X) > +WRITE_ONCE(X,V) { __store{ONCE}(X,V); } > > // Release Acquire and friends > -smp_store_release(X,V) { __store{release}(*X,V); } > -smp_load_acquire(X) __load{acquire}(*X) > -rcu_assign_pointer(X,V) { __store{release}(X,V); } > -rcu_dereference(X) __load{once}(X) > -smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; } > +smp_store_release(X,V) { __store{RELEASE}(*X,V); } > +smp_load_acquire(X) __load{ACQUIRE}(*X) > +rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); } > +rcu_dereference(X) __load{ONCE}(X) > +smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; } > > // Fences > -smp_mb() { __fence{mb}; } > +smp_mb() { __fence{MB}; } > smp_rmb() { __fence{rmb}; } > smp_wmb() { __fence{wmb}; } > smp_mb__before_atomic() { __fence{before-atomic}; } > @@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; } > barrier() { __fence{barrier}; } > > // Exchange > -xchg(X,V) __xchg{mb}(X,V) > -xchg_relaxed(X,V) __xchg{once}(X,V) > -xchg_release(X,V) __xchg{release}(X,V) > -xchg_acquire(X,V) __xchg{acquire}(X,V) > -cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) > -cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) > -cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) > -cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) > +xchg(X,V) __xchg{MB}(X,V) > +xchg_relaxed(X,V) __xchg{ONCE}(X,V) > +xchg_release(X,V) __xchg{RELEASE}(X,V) > +xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) > +cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) > +cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) > +cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) > +cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) > > // Spinlocks > spin_lock(X) { __lock(X); } > @@ -63,86 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); } > atomic_read_acquire(X) smp_load_acquire(X) > atomic_set_release(X,V) { smp_store_release(X,V); } > > -atomic_add(V,X) { __atomic_op{noreturn}(X,+,V); } > -atomic_sub(V,X) { __atomic_op{noreturn}(X,-,V); } > -atomic_and(V,X) { __atomic_op{noreturn}(X,&,V); } > -atomic_or(V,X) { __atomic_op{noreturn}(X,|,V); } > -atomic_xor(V,X) { __atomic_op{noreturn}(X,^,V); } > -atomic_inc(X) { __atomic_op{noreturn}(X,+,1); } > -atomic_dec(X) { __atomic_op{noreturn}(X,-,1); } > -atomic_andnot(V,X) { __atomic_op{noreturn}(X,&~,V); } > - > -atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V) > -atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V) > -atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V) > -atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V) > -atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V) > -atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V) > -atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V) > -atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V) > - > -atomic_fetch_and(V,X) __atomic_fetch_op{mb}(X,&,V) > -atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{once}(X,&,V) > -atomic_fetch_and_acquire(V,X) __atomic_fetch_op{acquire}(X,&,V) > -atomic_fetch_and_release(V,X) __atomic_fetch_op{release}(X,&,V) > - > -atomic_fetch_or(V,X) __atomic_fetch_op{mb}(X,|,V) > -atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{once}(X,|,V) > -atomic_fetch_or_acquire(V,X) __atomic_fetch_op{acquire}(X,|,V) > -atomic_fetch_or_release(V,X) __atomic_fetch_op{release}(X,|,V) > - > -atomic_fetch_xor(V,X) __atomic_fetch_op{mb}(X,^,V) > -atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{once}(X,^,V) > -atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{acquire}(X,^,V) > -atomic_fetch_xor_release(V,X) __atomic_fetch_op{release}(X,^,V) > - > -atomic_inc_return(X) __atomic_op_return{mb}(X,+,1) > -atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1) > -atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1) > -atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1) > -atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1) > -atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1) > -atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1) > -atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1) > - > -atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V) > -atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V) > -atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V) > -atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V) > -atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V) > -atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V) > -atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V) > -atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V) > - > -atomic_dec_return(X) __atomic_op_return{mb}(X,-,1) > -atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1) > -atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1) > -atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1) > -atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1) > -atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1) > -atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1) > -atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1) > - > -atomic_xchg(X,V) __xchg{mb}(X,V) > -atomic_xchg_relaxed(X,V) __xchg{once}(X,V) > -atomic_xchg_release(X,V) __xchg{release}(X,V) > -atomic_xchg_acquire(X,V) __xchg{acquire}(X,V) > -atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) > -atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) > -atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) > -atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) > - > -atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0 > -atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0 > -atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0 > -atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0 > -atomic_add_negative_relaxed(V,X) __atomic_op_return{once}(X,+,V) < 0 > -atomic_add_negative_acquire(V,X) __atomic_op_return{acquire}(X,+,V) < 0 > -atomic_add_negative_release(V,X) __atomic_op_return{release}(X,+,V) < 0 > - > -atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V) > -atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V) > -atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V) > -atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V) > - > -atomic_add_unless(X,V,W) __atomic_add_unless{mb}(X,V,W) > +atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); } > +atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); } > +atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); } > +atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); } > +atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); } > +atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); } > +atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); } > +atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); } > + > +atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V) > +atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) > +atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) > +atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V) > +atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V) > +atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V) > +atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V) > +atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V) > + > +atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V) > +atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V) > +atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V) > +atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V) > + > +atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V) > +atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V) > +atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V) > +atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V) > + > +atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V) > +atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V) > +atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V) > +atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V) > + > +atomic_inc_return(X) __atomic_op_return{MB}(X,+,1) > +atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1) > +atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1) > +atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1) > +atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1) > +atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1) > +atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1) > +atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1) > + > +atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V) > +atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V) > +atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V) > +atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V) > +atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V) > +atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V) > +atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V) > +atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V) > + > +atomic_dec_return(X) __atomic_op_return{MB}(X,-,1) > +atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1) > +atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1) > +atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1) > +atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1) > +atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1) > +atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1) > +atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1) > + > +atomic_xchg(X,V) __xchg{MB}(X,V) > +atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V) > +atomic_xchg_release(X,V) __xchg{RELEASE}(X,V) > +atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) > +atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) > +atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) > +atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) > +atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) > + > +atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0 > +atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0 > +atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0 > +atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0 > +atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0 > +atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0 > +atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0 > + > +atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V) > +atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V) > +atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V) > +atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V) > + > +atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W)
Am 11/6/2024 um 7:00 PM schrieb Paul E. McKenney: > Never mind, I found your patch in my spam folder. Glad that got cleared up. I had gotten very confused because you had also responded to the thread with the patch. > check this commit on -rcu For future reference, where can I find the -rcu repository? I couldn't find the commit on paulmckrcu/linux, but maybe I looked incorrectly. Have fun, jonas PS: > Apologies, and I thought that had been fixed. One might come to the conclusion that with sufficient effort, things can be un-fixed...
Jonas, despite the CC, your message has not made my gmail mbox, not even the spam folder. I'm replying using lore's reply link. On Thu, 7 Nov 2024 10:05:18 +0100, Jonas Oberhauser wrote: > Am 11/6/2024 um 7:00 PM schrieb Paul E. McKenney: >> Never mind, I found your patch in my spam folder. > Glad that got cleared up. I had gotten very confused because you had > also responded to the thread with the patch. > > > check this commit on -rcu > > For future reference, where can I find the -rcu repository? I couldn't > find the commit on paulmckrcu/linux, but maybe I looked incorrectly. You can find your patches at: [v4] https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git/commit/?id=c53d54ed7e40255ea0ea66dd121672fd22423326 [v5] https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git/commit/?id=9b13bea0966c498629e221c3022a591cc88d4c57 and [v4] https://github.com/paulmckrcu/linux/commit/c53d54ed7e40255ea0ea66dd121672fd22423326 [v5] https://github.com/paulmckrcu/linux/commit/9b13bea0966c498629e221c3022a591cc88d4c57 As Paul mentioned elsewhere, the [v5] patch misses tags from Boqun at the moment. HTH, Akira > > > Have fun, > jonas
Am 11/7/2024 um 11:51 AM schrieb Akira Yokosawa: > Jonas, despite the CC, your message has not made my gmail mbox, not even > the spam folder. > I'm replying using lore's reply link. > > As Paul mentioned elsewhere, the [v5] patch misses tags from Boqun at the moment. > So if I supply a new revision of a patch, should I include in all the reviewed-by of the previous patch? I hadn't done that before (because I thought I should not add other people's tags especially if they hadn't reviewed that specific revision), so we may be missing *a lot* of reviewed-by... If this is the case, I'll try to scour through the previous e-mails and add all the missing reviewed-by. Best wishes, jonas
On Fri, 8 Nov 2024 10:10:48 +0100, Jonas Oberhauser wrote: > Am 11/7/2024 um 11:51 AM schrieb Akira Yokosawa: >> Jonas, despite the CC, your message has not made my gmail mbox, not even >> the spam folder. >> I'm replying using lore's reply link. > >> As Paul mentioned elsewhere, the [v5] patch misses tags from Boqun at the moment. >> > > So if I supply a new revision of a patch, should I include in all the reviewed-by > of the previous patch? > It depends. > I hadn't done that before (because I thought I should not add other people's tags > especially if they hadn't reviewed that specific revision), so we may be missing > *a lot* of reviewed-by... > Section "Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:" of Documentation/process/submitting-patches.rst has this paragraph: Both Tested-by and Reviewed-by tags, once received on mailing list from tester or reviewer, should be added by author to the applicable patches when sending next versions. However if the patch has changed substantially in following version, these tags might not be applicable anymore and thus should be removed. Usually removal of someone's Tested-by or Reviewed-by tags should be mentioned in the patch changelog (after the '---' separator). Does this help you? > If this is the case, I'll try to scour through the previous e-mails and add > all the missing reviewed-by. Only if Paul asks you to do so ;-) HTH, Akira
Am 11/8/2024 um 11:12 AM schrieb Akira Yokosawa: > On Fri, 8 Nov 2024 10:10:48 +0100, Jonas Oberhauser wrote: >> I hadn't done that before (because I thought I should not add other people's tags >> especially if they hadn't reviewed that specific revision), so we may be missing >> *a lot* of reviewed-by... >> > > Section "Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:" > of Documentation/process/submitting-patches.rst has this paragraph: > > Both Tested-by and Reviewed-by tags, once received on mailing list from tester > or reviewer, should be added by author to the applicable patches when sending > next versions. However if the patch has changed substantially in following > version, these tags might not be applicable anymore and thus should be removed. > Usually removal of someone's Tested-by or Reviewed-by tags should be mentioned > in the patch changelog (after the '---' separator). > > Does this help you? Thanks so much, it does. My apologies to everyone whose reviewed-by tag I failed to add :( I should have read that more document more carefully. Best wishes, jonas
On Fri, Nov 08, 2024 at 12:07:43PM +0100, Jonas Oberhauser wrote: > > > Am 11/8/2024 um 11:12 AM schrieb Akira Yokosawa: > > On Fri, 8 Nov 2024 10:10:48 +0100, Jonas Oberhauser wrote: > > > I hadn't done that before (because I thought I should not add other people's tags > > > especially if they hadn't reviewed that specific revision), so we may be missing > > > *a lot* of reviewed-by... > > > > > > > Section "Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:" > > of Documentation/process/submitting-patches.rst has this paragraph: > > > > Both Tested-by and Reviewed-by tags, once received on mailing list from tester > > or reviewer, should be added by author to the applicable patches when sending > > next versions. However if the patch has changed substantially in following > > version, these tags might not be applicable anymore and thus should be removed. > > Usually removal of someone's Tested-by or Reviewed-by tags should be mentioned > > in the patch changelog (after the '---' separator). > > > > Does this help you? > > Thanks so much, it does. My apologies to everyone whose reviewed-by tag I > failed to add :( > > I should have read that more document more carefully. No worries! The important part is your changes. The tags are easy to add. And now you know. ;-) Thanx, Paul
On Fri, Nov 08, 2024 at 10:35:24AM -0800, Paul E. McKenney wrote: > On Fri, Nov 08, 2024 at 12:07:43PM +0100, Jonas Oberhauser wrote: > > > > > > Am 11/8/2024 um 11:12 AM schrieb Akira Yokosawa: > > > On Fri, 8 Nov 2024 10:10:48 +0100, Jonas Oberhauser wrote: > > > > I hadn't done that before (because I thought I should not add other people's tags > > > > especially if they hadn't reviewed that specific revision), so we may be missing > > > > *a lot* of reviewed-by... > > > > > > > > > > Section "Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:" > > > of Documentation/process/submitting-patches.rst has this paragraph: > > > > > > Both Tested-by and Reviewed-by tags, once received on mailing list from tester > > > or reviewer, should be added by author to the applicable patches when sending > > > next versions. However if the patch has changed substantially in following > > > version, these tags might not be applicable anymore and thus should be removed. > > > Usually removal of someone's Tested-by or Reviewed-by tags should be mentioned > > > in the patch changelog (after the '---' separator). > > > > > > Does this help you? > > > > Thanks so much, it does. My apologies to everyone whose reviewed-by tag I > > failed to add :( > > > > I should have read that more document more carefully. > > No worries! The important part is your changes. The tags are easy > to add. And now you know. ;-) The first three of your patches could go in, but the last two require a new herd7 release (for the lkmmv2 flag). Left to myself, I would hold all five until we get a new herd7 release, but if it is important to get the first three into the current merge window, please rebase them to mainline some time this week. I would then send a pull request for the for next week, the second and final week of the merge window. Over to you! Thanx, Paul
Am 11/18/2024 um 4:05 PM schrieb Paul E. McKenney: > On Fri, Nov 08, 2024 at 10:35:24AM -0800, Paul E. McKenney wrote: >> >> And now you know. ;-) > > if it is important to > get the first three into the current merge window, please rebase them > to mainline some time this week > Over to you! Luckily there's no need and we can wait until a new herd release. Have a lot of fun, jonas
On Tue, Nov 26, 2024 at 04:25:46PM +0100, Jonas Oberhauser wrote: > > > Am 11/18/2024 um 4:05 PM schrieb Paul E. McKenney: > > On Fri, Nov 08, 2024 at 10:35:24AM -0800, Paul E. McKenney wrote: > >> > > > And now you know. ;-) > > > > if it is important to > > get the first three into the current merge window, please rebase them > > to mainline some time this week > > > Over to you! > > Luckily there's no need and we can wait until a new herd release. Sounds good, and here is hoping for sooner rather than later. Thanx, Paul
On Thu, Nov 07, 2024 at 07:51:52PM +0900, Akira Yokosawa wrote: > Jonas, despite the CC, your message has not made my gmail mbox, not even > the spam folder. > I'm replying using lore's reply link. > > On Thu, 7 Nov 2024 10:05:18 +0100, Jonas Oberhauser wrote: > > Am 11/6/2024 um 7:00 PM schrieb Paul E. McKenney: > >> Never mind, I found your patch in my spam folder. > > Glad that got cleared up. I had gotten very confused because you had > > also responded to the thread with the patch. > > > > > check this commit on -rcu > > > > For future reference, where can I find the -rcu repository? I couldn't > > find the commit on paulmckrcu/linux, but maybe I looked incorrectly. > > You can find your patches at: > > [v4] https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git/commit/?id=c53d54ed7e40255ea0ea66dd121672fd22423326 > [v5] https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git/commit/?id=9b13bea0966c498629e221c3022a591cc88d4c57 > > and > > [v4] https://github.com/paulmckrcu/linux/commit/c53d54ed7e40255ea0ea66dd121672fd22423326 > [v5] https://github.com/paulmckrcu/linux/commit/9b13bea0966c498629e221c3022a591cc88d4c57 > > As Paul mentioned elsewhere, the [v5] patch misses tags from Boqun at the moment. Thank you both! Just for the record, I must sadly but emphatically agree with Jonas's point about things getting unfixed... Thanx, Paul
On Wed, Oct 30, 2024 at 07:34:45AM -0700, Boqun Feng wrote: > On Wed, Oct 30, 2024 at 12:38:26PM +0100, Jonas Oberhauser wrote: > > > > > > Am 10/30/2024 um 12:41 AM schrieb Paul E. McKenney: > > > On Mon, Oct 28, 2024 at 05:15:46PM -0700, Boqun Feng wrote: > > > > On Mon, Sep 30, 2024 at 12:57:10PM +0200, Jonas Oberhauser wrote: > > > > > Not all tags that are always there syntactically also provide semantic > > > > > membership in the corresponding set. For example, an 'acquire tag on a > > > > > > > > Maybe: > > > > > > > > Not all annotated accesses provide the same semantic as their syntactic > > > > tags... > > > > > > > > ? > > > > > > Jonas, are you OK with this change? If so, I can apply it on my next > > > rebase. > > > > I'm ok with an extra s after semantics and a minor rephrase: > > > > Not all annotated accesses provide the semantics their syntactic > > tags would imply > > > > What do you think @Boqun ? > > Yes, of course! This looks good to me. Very good! This is in the commit that you (Jonas) will fix, correct? If so, could you please make this update as well? Otherwise, Murphy being who he is, I will end up changing this and then overwriting my change with your updated commit. ;-) Thanx, Paul
© 2016 - 2024 Red Hat, Inc.