From nobody Fri Feb 13 13:28:07 2026 Received: from frasgout12.his.huawei.com (frasgout12.his.huawei.com [14.137.139.154]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 232E617E8E0 for ; Mon, 27 May 2024 15:23:47 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=14.137.139.154 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1716823429; cv=none; b=D9lAE/YgGxXmqam/PSY9hhKEimvRTVi4DhZFRFnoZtXO/ll22akZE/1XmPlOm9+xZgbqFmTS48/DU4Jan39QF7xP5e2KH9ft36RoLWKNit+4vph9IujpZ/5rJ1k1h9Pe9r8J7MmqkNRbcBfxxXAQw3M5VKLHKRhlR854tN0046k= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1716823429; c=relaxed/simple; bh=E12HbST7gVhui/vfd1239eOfyFPBo4oTSXW6Fz+MpH4=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=UfH59MY3TXsyyzhGjmhMgwK5K5dLdPsOuMx3sO0dBRyPBE04vH9g9lTI/n5zCq7yN7vNQwQhydjltg3mcM3KSKyLZbSqYGYWqqCO/v5w2QGkSh67Jmwf8vfvdhRY51o0Nap5dz98kALKQc6vqifwt/GOqbzHsSeVtoJUdCJ7p+o= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=14.137.139.154 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.18.186.29]) by frasgout12.his.huawei.com (SkyGuard) with ESMTP id 4VnzPb0h4Tz9v7JM for ; Mon, 27 May 2024 23:01:39 +0800 (CST) Received: from mail02.huawei.com (unknown [7.182.16.47]) by mail.maildlp.com (Postfix) with ESMTP id D1CB314040D for ; Mon, 27 May 2024 23:23:35 +0800 (CST) Received: from huaweicloud.com (unknown [10.206.133.88]) by APP1 (Coremail) with SMTP id LxC2BwAHshpgpVRmXLgGCQ--.41702S3; Mon, 27 May 2024 16:23:35 +0100 (CET) From: Jonas Oberhauser To: paulmck@kernel.org Cc: stern@rowland.harvard.edu, parri.andrea@gmail.com, will@kernel.org, peterz@infradead.org, boqun.feng@gmail.com, npiggin@gmail.com, dhowells@redhat.com, j.alglave@ucl.ac.uk, luc.maranget@inria.fr, akiyks@gmail.com, dlustig@nvidia.com, joel@joelfernandes.org, urezki@gmail.com, quic_neeraju@quicinc.com, frederic@kernel.org, linux-kernel@vger.kernel.org, Jonas Oberhauser Subject: [RFC][PATCH 1/4] tools/memory-model: Legitimize current use of tags in LKMM macros Date: Mon, 27 May 2024 17:22:50 +0200 Message-Id: <20240527152253.195956-2-jonas.oberhauser@huaweicloud.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240527152253.195956-1-jonas.oberhauser@huaweicloud.com> References: <20240527152253.195956-1-jonas.oberhauser@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: LxC2BwAHshpgpVRmXLgGCQ--.41702S3 X-Coremail-Antispam: 1UD129KBjvJXoW7CF4xGr45tF4DGr43tF4ruFg_yoW8AryDpF ZrCw15GF4UXrZ8uwnrJrWUua4S9a93Jr45Gr9rta43A34YvrnFy3WqqFnF9FZ3ZrZ7uw17 Xr12qFn2q3WUArJanT9S1TB71UUUUUJqnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_Jr4l82xGYIkIc2 x26xkF7I0E14v26r4j6ryUM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2z4x0 Y4vE2Ix0cI8IcVAFwI0_Gr0_Xr1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UM2 8EF7xvwVC2z280aVAFwI0_Gr0_Cr1l84ACjcxK6I8E87Iv6xkF7I0E14v26r4UJVWxJr1l n4kS14v26r1Y6r17M2AIxVAIcxkEcVAq07x20xvEncxIr21l5I8CrVACY4xI64kE6c02F4 0Ex7xfMcIj6xIIjxv20xvE14v26r1Y6r17McIj6I8E87Iv67AKxVWUJVW8JwAm72CE4IkC 6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20VAGYxC7M4IIrI 8v6xkF7I0E8cxan2IY04v7MxkF7I0En4kS14v26r126r1DMxAIw28IcxkI7VAKI48JMxC2 0s026xCaFVCjc4AY6r1j6r4UMI8I3I0E5I8CrVAFwI0_Jr0_Jr4lx2IqxVCjr7xvwVAFwI 0_JrI_JrWlx4CE17CEb7AF67AKxVW8ZVWrXwCIc40Y0x0EwIxGrwCI42IY6xIIjxv20xvE 14v26r4j6ryUMIIF0xvE2Ix0cI8IcVCY1x0267AKxVWxJVW8Jr1lIxAIcVCF04k26cxKx2 IYs7xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r4j6F4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_ Gr1j6F4UJbIYCTnIWIevJa73UjIFyTuYvjfUb7KsUUUUU X-CM-SenderInfo: 5mrqt2oorev25kdx2v3u6k3tpzhluzxrxghudrp/ Content-Type: text/plain; charset="utf-8" The current macros in linux-kernel.def reference instructions such as __xchg{mb} or __cmpxchg{acquire}, which are invalid combinations of tags and instructions according to the declarations in linux-kernel.bell. This works with current herd7 because herd7 removes these tags anyways and does not actually enforce validity of combinations at all. If a future herd7 version no longer applies these hardcoded transformations, then all currently invalid combinations will actually appear on some instruction. We therefore adjust the declarations to make the resulting combinations valid, by adding the 'mb tag to the set of Accesses and allowing all Accesses to appear on all read, write, and RMW instructions. Signed-off-by: Jonas Oberhauser --- tools/memory-model/linux-kernel.bell | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linu= x-kernel.bell index ce068700939c..1b2444cdf8d1 100644 --- a/tools/memory-model/linux-kernel.bell +++ b/tools/memory-model/linux-kernel.bell @@ -16,10 +16,11 @@ enum Accesses =3D 'once (*READ_ONCE,WRITE_ONCE*) || 'release (*smp_store_release*) || 'acquire (*smp_load_acquire*) || - 'noreturn (* R of non-return RMW *) -instructions R[{'once,'acquire,'noreturn}] -instructions W[{'once,'release}] -instructions RMW[{'once,'acquire,'release}] + 'noreturn (* R of non-return RMW *) || + 'mb (*xchg(),compare_exchange(),...*) +instructions R[Accesses] +instructions W[Accesses] +instructions RMW[Accesses] =20 enum Barriers =3D 'wmb (*smp_wmb*) || 'rmb (*smp_rmb*) || --=20 2.34.1 From nobody Fri Feb 13 13:28:07 2026 Received: from frasgout12.his.huawei.com (frasgout12.his.huawei.com [14.137.139.154]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 891F461FF6 for ; Mon, 27 May 2024 15:23:55 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=14.137.139.154 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1716823437; cv=none; b=MrOmFVFdxO7dV0/fYpyPzQvIpR0/wC8vaNC07uCNXk5Gqv5uyY2lQm+gvRI1N/jO6b08I9/MK4CmzKf2hXz0QnENUU+vxiNO4NsNFEcbgZ06JoEpgYOdUiU7+e654gIzA0C/kGLNFD2aFmLONgLMJV6lrDkS3s9RnanO1uBnWyE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1716823437; c=relaxed/simple; bh=0xHMFCV5yP3qe4unc9YAGElnXn9fL7ClqpQqFLGvio8=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=tSdem08gC/27ggVptZ8V/zzSvUnfKjA+5yiN9TPifWo23hrELjfRQtZZainWpdD+TSiFpvW0iSpmgX4NMitOSnsYV4o2cGRhs4Lj9su0TwN4HTNPCq/Mgnp6iOEF9rUpzqONFLo6ILR1s6MhpMO0b9/tewx8yLhE37SFJVVvpII= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=14.137.139.154 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.18.186.51]) by frasgout12.his.huawei.com (SkyGuard) with ESMTP id 4VnzPk5WD3z9v7JM for ; Mon, 27 May 2024 23:01:46 +0800 (CST) Received: from mail02.huawei.com (unknown [7.182.16.47]) by mail.maildlp.com (Postfix) with ESMTP id 925C2140485 for ; Mon, 27 May 2024 23:23:46 +0800 (CST) Received: from huaweicloud.com (unknown [10.206.133.88]) by APP1 (Coremail) with SMTP id LxC2BwAHshpgpVRmXLgGCQ--.41702S4; Mon, 27 May 2024 16:23:46 +0100 (CET) From: Jonas Oberhauser To: paulmck@kernel.org Cc: stern@rowland.harvard.edu, parri.andrea@gmail.com, will@kernel.org, peterz@infradead.org, boqun.feng@gmail.com, npiggin@gmail.com, dhowells@redhat.com, j.alglave@ucl.ac.uk, luc.maranget@inria.fr, akiyks@gmail.com, dlustig@nvidia.com, joel@joelfernandes.org, urezki@gmail.com, quic_neeraju@quicinc.com, frederic@kernel.org, linux-kernel@vger.kernel.org, Jonas Oberhauser Subject: [RFC][PATCH 2/4] tools/memory-model: Define applicable tags on operation in tools/... Date: Mon, 27 May 2024 17:22:51 +0200 Message-Id: <20240527152253.195956-3-jonas.oberhauser@huaweicloud.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240527152253.195956-1-jonas.oberhauser@huaweicloud.com> References: <20240527152253.195956-1-jonas.oberhauser@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: LxC2BwAHshpgpVRmXLgGCQ--.41702S4 X-Coremail-Antispam: 1UD129KBjvJXoW7WF48tr4xXr45XrWDJFy7ZFb_yoW8Aw45pr Z7X3y5KryUtwn3Z39rJ34fua4rWayrGFW5Gr92k3srZr98XrnFya1DXan0vFZaqrW3JayU Za1jvFWqkw4UCrJanT9S1TB71UUUUUJqnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQ014x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_Jryl82xGYIkIc2 x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2z4x0 Y4vE2Ix0cI8IcVAFwI0_Gr0_Xr1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Cr0_Gr1UM2 8EF7xvwVC2z280aVAFwI0_Gr0_Cr1l84ACjcxK6I8E87Iv6xkF7I0E14v26r4UJVWxJr1l n4kS14v26r1Y6r17M2AIxVAIcxkEcVAq07x20xvEncxIr21l5I8CrVACY4xI64kE6c02F4 0Ex7xfMcIj6xIIjxv20xvE14v26r1Y6r17McIj6I8E87Iv67AKxVWUJVW8JwAm72CE4IkC 6x0Yz7v_Jr0_Gr1lF7xvr2IYc2Ij64vIr41lF7I21c0EjII2zVCS5cI20VAGYxC7M4IIrI 8v6xkF7I0E8cxan2IY04v7MxkF7I0En4kS14v26r126r1DMxAIw28IcxkI7VAKI48JMxC2 0s026xCaFVCjc4AY6r1j6r4UMI8I3I0E5I8CrVAFwI0_Jr0_Jr4lx2IqxVCjr7xvwVAFwI 0_JrI_JrWlx4CE17CEb7AF67AKxVW8ZVWrXwCIc40Y0x0EwIxGrwCI42IY6xIIjxv20xvE 14v26r4j6ryUMIIF0xvE2Ix0cI8IcVCY1x0267AKxVWxJVW8Jr1lIxAIcVCF04k26cxKx2 IYs7xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r4j6F4UMIIF0xvEx4A2jsIEc7CjxVAFwI0_ Gr1j6F4UJbIYCTnIWIevJa73UjIFyTuYvjfU5OJ5DUUUU X-CM-SenderInfo: 5mrqt2oorev25kdx2v3u6k3tpzhluzxrxghudrp/ Content-Type: text/plain; charset="utf-8" Herd7 transforms reads, writes, and read-modify-writes by eliminating 'acquire tags from writes, 'release tags from reads, and 'acquire, 'release, and 'mb tags from failed read-modify-writes. We emulate this behavior by redefining Acquire, Release, and Mb sets in linux-kernel.bell to explicitly exclude those combinations. Herd7 furthermore adds 'noreturn tag to certain reads. Currently herd7 does not allow specifying the 'noreturn tag manually, but such manual declaration (e.g., through a syntax __atomic_op{noreturn}) would add invalid 'noreturn tags to writes; in preparation, we already also exclude this combination. Signed-off-by: Jonas Oberhauser --- tools/memory-model/linux-kernel.bell | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linu= x-kernel.bell index 1b2444cdf8d1..08fa1ccb1328 100644 --- a/tools/memory-model/linux-kernel.bell +++ b/tools/memory-model/linux-kernel.bell @@ -36,6 +36,13 @@ enum Barriers =3D 'wmb (*smp_wmb*) || 'after-srcu-read-unlock (*smp_mb__after_srcu_read_unlock*) instructions F[Barriers] =20 +(* Remove impossible tags, such as Acquire on a store or failed RMW *) +let FailedRMW =3D RMW \ (domain(rmw) | range(rmw)) +let Acquire =3D Acquire \ W \ FailedRMW +let Release =3D Release \ R \ FailedRMW +let Mb =3D Mb \ FailedRMW +let Noreturn =3D Noreturn \ W + (* SRCU *) enum SRCU =3D 'srcu-lock || 'srcu-unlock || 'sync-srcu instructions SRCU[SRCU] @@ -82,4 +89,4 @@ let Plain =3D M \ Marked let carry-dep =3D (data ; [~ Srcu-unlock] ; rfi)* let addr =3D carry-dep ; addr let ctrl =3D carry-dep ; ctrl -let data =3D carry-dep ; data +let data =3D carry-dep ; data \ No newline at end of file --=20 2.34.1 From nobody Fri Feb 13 13:28:07 2026 Received: from frasgout13.his.huawei.com (frasgout13.his.huawei.com [14.137.139.46]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A1F7661FFB for ; Mon, 27 May 2024 15:24:00 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=14.137.139.46 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1716823442; cv=none; b=rnCtg501sHCZCIScf8L6a9Ag+0HNnr0Hnc7XSP9peOazA2zSSakHpSbrYAEffU2Hvfpr8DvQwC2jEm0UV2ZR3b5r6pFx4LnpzLfu4AKqjH+YjdufPZRWlpUiagx+YC+esLm1YpUau8xLv4uzSGl291fszQA4rVDYml51kwNDiHM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1716823442; c=relaxed/simple; bh=zbu6nmCHUO93zt/7OgCtge0gcv5wVCjbKVEbJTIMhAo=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=ffBh8oKvTfZoCxj17htNeLOSI37v+5X02lbP1BQaEIuOMrc9OsHbYep4im7xYLAMAnQAi9GZCKJ5I5iFW1TEgTHDFfqYttBSrOubs7xsNfUxZcVhkGm0WluUuI+qN22NrP87Em5b1MmoyPnGX0JqZaguhE+L7dNKd6eLTTtFJGw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=14.137.139.46 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.18.186.51]) by frasgout13.his.huawei.com (SkyGuard) with ESMTP id 4VnzWN54T0z9v7Hn for ; Mon, 27 May 2024 23:06:40 +0800 (CST) Received: from mail02.huawei.com (unknown [7.182.16.47]) by mail.maildlp.com (Postfix) with ESMTP id D495D140133 for ; Mon, 27 May 2024 23:23:57 +0800 (CST) Received: from huaweicloud.com (unknown [10.206.133.88]) by APP1 (Coremail) with SMTP id LxC2BwAHshpgpVRmXLgGCQ--.41702S5; Mon, 27 May 2024 16:23:57 +0100 (CET) From: Jonas Oberhauser To: paulmck@kernel.org Cc: stern@rowland.harvard.edu, parri.andrea@gmail.com, will@kernel.org, peterz@infradead.org, boqun.feng@gmail.com, npiggin@gmail.com, dhowells@redhat.com, j.alglave@ucl.ac.uk, luc.maranget@inria.fr, akiyks@gmail.com, dlustig@nvidia.com, joel@joelfernandes.org, urezki@gmail.com, quic_neeraju@quicinc.com, frederic@kernel.org, linux-kernel@vger.kernel.org, Jonas Oberhauser , Viktor Vafeiadis Subject: [RFC][PATCH 3/4] tools/memory-model: Define effect of Mb tags on RMWs in tools/... Date: Mon, 27 May 2024 17:22:52 +0200 Message-Id: <20240527152253.195956-4-jonas.oberhauser@huaweicloud.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240527152253.195956-1-jonas.oberhauser@huaweicloud.com> References: <20240527152253.195956-1-jonas.oberhauser@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: LxC2BwAHshpgpVRmXLgGCQ--.41702S5 X-Coremail-Antispam: 1UD129KBjvJXoW7KFWUZry5tFWkCFW8JrWDurg_yoW8WFWkpr Z0g345GrWvgryj9as7ZanxZFyrWa1xKF13WFs7Cw1fZr43XrW7Z34rtanIqr9IqFsa9a45 Zr4jva4vya4kAFJanT9S1TB71UUUUUJqnTZGkaVYY2UrUUUUjbIjqfuFe4nvWSU5nxnvy2 9KBjDU0xBIdaVrnRJUUUQF14x267AKxVWrJVCq3wAFc2x0x2IEx4CE42xK8VAvwI8IcIk0 rVWrJVCq3wAFIxvE14AKwVWUJVWUGwA2048vs2IY020E87I2jVAFwI0_JrWl82xGYIkIc2 x26xkF7I0E14v26ryj6s0DM28lY4IEw2IIxxk0rwA2F7IY1VAKz4vEj48ve4kI8wA2z4x0 Y4vE2Ix0cI8IcVAFwI0_Gr0_Xr1l84ACjcxK6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJw A2z4x0Y4vEx4A2jsIE14v26r4j6F4UM28EF7xvwVC2z280aVCY1x0267AKxVW8Jr0_Cr1U M2kKe7AKxVWUXVWUAwAS0I0E0xvYzxvE52x082IY62kv0487Mc02F40EFcxC0VAKzVAqx4 xG6I80ewAv7VC0I7IYx2IY67AKxVWUXVWUAwAv7VC2z280aVAFwI0_Jr0_Gr1lOx8S6xCa FVCjc4AY6r1j6r4UM4x0Y48IcxkI7VAKI48JM4x0x7Aq67IIx4CEVc8vx2IErcIFxwACI4 02YVCY1x02628vn2kIc2xKxwCY1x0262kKe7AKxVWUtVW8ZwCF04k20xvY0x0EwIxGrwCF x2IqxVCFs4IE7xkEbVWUJVW8JwC20s026c02F40E14v26r1j6r18MI8I3I0E7480Y4vE14 v26r106r1rMI8E67AF67kF1VAFwI0_GFv_WrylIxkGc2Ij64vIr41lIxAIcVC0I7IYx2IY 67AKxVW8JVW5JwCI42IY6xIIjxv20xvEc7CjxVAFwI0_Gr1j6F4UJwCI42IY6xAIw20EY4 v20xvaj40_Jr0_JF4lIxAIcVC2z280aVAFwI0_Gr0_Cr1lIxAIcVC2z280aVCY1x0267AK xVW8Jr0_Cr1UYxBIdaVFxhVjvjDU0xZFpf9x0JU9J5rUUUUU= X-CM-SenderInfo: 5mrqt2oorev25kdx2v3u6k3tpzhluzxrxghudrp/ Content-Type: text/plain; charset="utf-8" Herd7 transforms successful RMW with Mb tags by inserting smp_mb() fences around them. We emulate this by considering imaginary po-edges before the RMW read and before the RMW write, and extending the smp_mb() ordering rule, which currently only applies to real po edges that would be found around a really inserted smp_mb(), also to cases of the only imagined po edges. Reported-by: Viktor Vafeiadis Suggested-by: Alan Stern Signed-off-by: Jonas Oberhauser --- tools/memory-model/linux-kernel.cat | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux= -kernel.cat index adf3c4f41229..3a6e9677abe4 100644 --- a/tools/memory-model/linux-kernel.cat +++ b/tools/memory-model/linux-kernel.cat @@ -34,6 +34,15 @@ let R4rmb =3D R \ Noreturn (* Reads for which rmb works = *) let rmb =3D [R4rmb] ; fencerel(Rmb) ; [R4rmb] let wmb =3D [W] ; fencerel(Wmb) ; [W] let mb =3D ([M] ; fencerel(Mb) ; [M]) | + (* full-barrier RMWs (successful cmpxchg(), xchg(), etc.) act as + * though there were enclosed by smp_mb(). + * The effect of these virtual smp_mb() is formalized by adding + * Mb tags to the read and write of the operation, and providing + * the same ordering as though there were additional po edges + * between the Mb tag and the read resp. write. + *) + ([M] ; po ; [Mb] ; (* po ; *) [R]) | + ([W] ; (* po ; *) [Mb] ; po ; [M]) | ([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) | ([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) | ([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) | --=20 2.34.1 From nobody Fri Feb 13 13:28:07 2026 Received: from frasgout11.his.huawei.com (frasgout11.his.huawei.com [14.137.139.23]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B40F2155C90 for ; Mon, 27 May 2024 15:24:11 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=14.137.139.23 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1716823453; cv=none; b=hFMW2InqKUW0utoT6V0KuZRqQ6qKt9RnChR2jDVgkCvbQZYLsBCt1iXIQlgrD1/XMNQwvpVEjIAnZtiReYYsMsJDnZUF32vDjTZjTby+ykWmGRDGo/hdxxUjRJaP3WNJQLmc3xVsk0dZkuLQMTzz17w8GBWYTcVDyOgWF42Xt+4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1716823453; c=relaxed/simple; bh=HCvZXMK5hE/NWRyWObNazkHHQP0REqw9JI/g/W5F/Eg=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=QdLlFHRqQpNpOJSUgU0oUcJRpQ3ttF9sDop4OKcHGkd09ASKF4mxieHgkJvRiFQtcI7HU6Qdh+kM6tdyix6juK9IJOljSfjqKpZUgr8lfz+MwsgPSDQ0iBwMSbrIIDT3BNuzRdClR8ECh7MX5P/+tJMOuoNgl3YGOZdBj1BxEbY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com; spf=pass smtp.mailfrom=huaweicloud.com; arc=none smtp.client-ip=14.137.139.23 Authentication-Results: smtp.subspace.kernel.org; dmarc=none (p=none dis=none) header.from=huaweicloud.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huaweicloud.com Received: from mail.maildlp.com (unknown [172.18.186.51]) by frasgout11.his.huawei.com (SkyGuard) with ESMTP id 4VnzWY2Bfkz9v7Hs for ; Mon, 27 May 2024 23:06:49 +0800 (CST) Received: from mail02.huawei.com (unknown [7.182.16.47]) by mail.maildlp.com (Postfix) with ESMTP id B5BA0140132 for ; Mon, 27 May 2024 23:24:08 +0800 (CST) Received: from huaweicloud.com (unknown [10.206.133.88]) by APP1 (Coremail) with SMTP id LxC2BwAHshpgpVRmXLgGCQ--.41702S6; Mon, 27 May 2024 16:24:08 +0100 (CET) From: Jonas Oberhauser To: paulmck@kernel.org Cc: stern@rowland.harvard.edu, parri.andrea@gmail.com, will@kernel.org, peterz@infradead.org, boqun.feng@gmail.com, npiggin@gmail.com, dhowells@redhat.com, j.alglave@ucl.ac.uk, luc.maranget@inria.fr, akiyks@gmail.com, dlustig@nvidia.com, joel@joelfernandes.org, urezki@gmail.com, quic_neeraju@quicinc.com, frederic@kernel.org, linux-kernel@vger.kernel.org, Jonas Oberhauser Subject: [RFC][PATCH 4/4] tools/memory-model: Distinguish between syntactic and semantic tags Date: Mon, 27 May 2024 17:22:53 +0200 Message-Id: <20240527152253.195956-5-jonas.oberhauser@huaweicloud.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20240527152253.195956-1-jonas.oberhauser@huaweicloud.com> References: <20240527152253.195956-1-jonas.oberhauser@huaweicloud.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-CM-TRANSID: LxC2BwAHshpgpVRmXLgGCQ--.41702S6 X-Coremail-Antispam: 1UD129KBjvAXoWfGr1fuw4rKrWkuw4kCryrtFb_yoW8GF48Xo WrGr1ft3W8XryDWan8Kw1xJrWDW3y2q3Z0gry8Gw1jvFy7Za95XrnrG3Wjq34xtFy5Cw15 WrZ7Z3sxXay7Jr1kn29KB7ZKAUJUUUUr529EdanIXcx71UUUUU7v73VFW2AGmfu7bjvjm3 AaLaJ3UjIYCTnIWjp_UUUO97AC8VAFwI0_Wr0E3s1l1xkIjI8I6I8E6xAIw20EY4v20xva j40_Wr0E3s1l1IIY67AEw4v_Jr0_Jr4l82xGYIkIc2x26280x7IE14v26r126s0DM28Irc Ia0xkI8VCY1x0267AKxVW5JVCq3wA2ocxC64kIII0Yj41l84x0c7CEw4AK67xGY2AK021l 84ACjcxK6xIIjxv20xvE14v26r4j6ryUM28EF7xvwVC0I7IYx2IY6xkF7I0E14v26r4UJV WxJr1l84ACjcxK6I8E87Iv67AKxVW8JVWxJwA2z4x0Y4vEx4A2jsIEc7CjxVAFwI0_Gr1j 6F4UJwAaw2AFwI0_Jrv_JF1le2I262IYc4CY6c8Ij28IcVAaY2xG8wAqx4xG64xvF2IEw4 CE5I8CrVC2j2WlYx0E2Ix0cI8IcVAFwI0_Jrv_JF1lYx0Ex4A2jsIE14v26r1j6r4UMcvj eVCFs4IE7xkEbVWUJVW8JwACjcxG0xvY0x0EwIxGrwACjI8F5VA0II8E6IAqYI8I648v4I 1lFIxGxcIEc7CjxVA2Y2ka0xkIwI1lc7CjxVAaw2AFwI0_Jw0_GFyl42xK82IYc2Ij64vI r41l4I8I3I0E4IkC6x0Yz7v_Jr0_Gr1lx2IqxVAqx4xG67AKxVWUJVWUGwC20s026x8Gjc xK67AKxVWUGVWUWwC2zVAF1VAY17CE14v26r4a6rW5MIIYrxkI7VAKI48JMIIF0xvE2Ix0 cI8IcVAFwI0_Gr0_Xr1lIxAIcVC0I7IYx2IY6xkF7I0E14v26r4UJVWxJr1lIxAIcVCF04 k26cxKx2IYs7xG6r1j6r1xMIIF0xvEx4A2jsIE14v26r4j6F4UMIIF0xvEx4A2jsIEc7Cj xVAFwI0_Gr1j6F4UJbIYCTnIWIevJa73UjIFyTuYvjfUYcTmDUUUU X-CM-SenderInfo: 5mrqt2oorev25kdx2v3u6k3tpzhluzxrxghudrp/ Content-Type: text/plain; charset="utf-8" Not all tags that are always there syntactically also provide semantic membership in the corresponding set. For example, an 'acquire tag on a write does not imply that the write is finally in the Acquire set and provides acquire ordering. To distinguish in those cases between the syntactic tags and actual sets, we capitalize the former, so 'ACQUIRE tags may be present on both reads and writes, but only reads will appear in the Acquire set. For tags where the two concepts are the same we do not use specific capitalization to make this distinction. Reported-by: Boqun Feng Signed-off-by: Jonas Oberhauser --- tools/memory-model/linux-kernel.bell | 22 ++-- tools/memory-model/linux-kernel.def | 176 +++++++++++++-------------- 2 files changed, 99 insertions(+), 99 deletions(-) diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linu= x-kernel.bell index 08fa1ccb1328..045d94a8eabf 100644 --- a/tools/memory-model/linux-kernel.bell +++ b/tools/memory-model/linux-kernel.bell @@ -13,18 +13,18 @@ =20 "Linux-kernel memory consistency model" =20 -enum Accesses =3D 'once (*READ_ONCE,WRITE_ONCE*) || - 'release (*smp_store_release*) || - 'acquire (*smp_load_acquire*) || - 'noreturn (* R of non-return RMW *) || - 'mb (*xchg(),compare_exchange(),...*) +enum Accesses =3D 'ONCE (*READ_ONCE,WRITE_ONCE*) || + 'RELEASE (*smp_store_release*) || + 'ACQUIRE (*smp_load_acquire*) || + 'NORETURN (* R of non-return RMW *) || + 'MB (*xchg(),compare_exchange(),...*) instructions R[Accesses] instructions W[Accesses] instructions RMW[Accesses] =20 enum Barriers =3D 'wmb (*smp_wmb*) || 'rmb (*smp_rmb*) || - 'mb (*smp_mb*) || + 'MB (*smp_mb*) || 'barrier (*barrier*) || 'rcu-lock (*rcu_read_lock*) || 'rcu-unlock (*rcu_read_unlock*) || @@ -38,10 +38,10 @@ instructions F[Barriers] =20 (* Remove impossible tags, such as Acquire on a store or failed RMW *) let FailedRMW =3D RMW \ (domain(rmw) | range(rmw)) -let Acquire =3D Acquire \ W \ FailedRMW -let Release =3D Release \ R \ FailedRMW -let Mb =3D Mb \ FailedRMW -let Noreturn =3D Noreturn \ W +let Acquire =3D ACQUIRE \ W \ FailedRMW +let Release =3D RELEASE \ R \ FailedRMW +let Mb =3D MB \ FailedRMW +let Noreturn =3D NORETURN \ W =20 (* SRCU *) enum SRCU =3D 'srcu-lock || 'srcu-unlock || 'sync-srcu @@ -81,7 +81,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid= -sleep flag ~empty different-values(srcu-rscs) as srcu-bad-value-match =20 (* Compute marked and plain memory accesses *) -let Marked =3D (~M) | IW | Once | Release | Acquire | domain(rmw) | range(= rmw) | +let Marked =3D (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | domain(rmw) | r= ange(rmw) | LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock let Plain =3D M \ Marked =20 diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux= -kernel.def index a12b96c547b7..001366ff3fb4 100644 --- a/tools/memory-model/linux-kernel.def +++ b/tools/memory-model/linux-kernel.def @@ -6,18 +6,18 @@ // which appeared in ASPLOS 2018. =20 // ONCE -READ_ONCE(X) __load{once}(X) -WRITE_ONCE(X,V) { __store{once}(X,V); } +READ_ONCE(X) __load{ONCE}(X) +WRITE_ONCE(X,V) { __store{ONCE}(X,V); } =20 // Release Acquire and friends -smp_store_release(X,V) { __store{release}(*X,V); } -smp_load_acquire(X) __load{acquire}(*X) -rcu_assign_pointer(X,V) { __store{release}(X,V); } -rcu_dereference(X) __load{once}(X) -smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; } +smp_store_release(X,V) { __store{RELEASE}(*X,V); } +smp_load_acquire(X) __load{ACQUIRE}(*X) +rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); } +rcu_dereference(X) __load{ONCE}(X) +smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; } =20 // Fences -smp_mb() { __fence{mb}; } +smp_mb() { __fence{MB}; } smp_rmb() { __fence{rmb}; } smp_wmb() { __fence{wmb}; } smp_mb__before_atomic() { __fence{before-atomic}; } @@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-r= ead-unlock}; } barrier() { __fence{barrier}; } =20 // Exchange -xchg(X,V) __xchg{mb}(X,V) -xchg_relaxed(X,V) __xchg{once}(X,V) -xchg_release(X,V) __xchg{release}(X,V) -xchg_acquire(X,V) __xchg{acquire}(X,V) -cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) -cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) -cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) -cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) +xchg(X,V) __xchg{MB}(X,V) +xchg_relaxed(X,V) __xchg{ONCE}(X,V) +xchg_release(X,V) __xchg{RELEASE}(X,V) +xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) +cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) +cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) +cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) +cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) =20 // Spinlocks spin_lock(X) { __lock(X); } @@ -72,75 +72,75 @@ atomic_inc(X) { __atomic_op(X,+,1); } atomic_dec(X) { __atomic_op(X,-,1); } atomic_andnot(V,X) { __atomic_op(X,&~,V); } =20 -atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V) -atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V) -atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V) -atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V) -atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V) -atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V) -atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V) -atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V) - -atomic_fetch_and(V,X) __atomic_fetch_op{mb}(X,&,V) -atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{once}(X,&,V) -atomic_fetch_and_acquire(V,X) __atomic_fetch_op{acquire}(X,&,V) -atomic_fetch_and_release(V,X) __atomic_fetch_op{release}(X,&,V) - -atomic_fetch_or(V,X) __atomic_fetch_op{mb}(X,|,V) -atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{once}(X,|,V) -atomic_fetch_or_acquire(V,X) __atomic_fetch_op{acquire}(X,|,V) -atomic_fetch_or_release(V,X) __atomic_fetch_op{release}(X,|,V) - -atomic_fetch_xor(V,X) __atomic_fetch_op{mb}(X,^,V) -atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{once}(X,^,V) -atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{acquire}(X,^,V) -atomic_fetch_xor_release(V,X) __atomic_fetch_op{release}(X,^,V) - -atomic_inc_return(X) __atomic_op_return{mb}(X,+,1) -atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1) -atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1) -atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1) -atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1) -atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1) -atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1) -atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1) - -atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V) -atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V) -atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V) -atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V) -atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V) -atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V) -atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V) -atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V) - -atomic_dec_return(X) __atomic_op_return{mb}(X,-,1) -atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1) -atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1) -atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1) -atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1) -atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1) -atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1) -atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1) - -atomic_xchg(X,V) __xchg{mb}(X,V) -atomic_xchg_relaxed(X,V) __xchg{once}(X,V) -atomic_xchg_release(X,V) __xchg{release}(X,V) -atomic_xchg_acquire(X,V) __xchg{acquire}(X,V) -atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) -atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) -atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) -atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) - -atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) =3D=3D 0 -atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) =3D=3D 0 -atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) =3D=3D 0 -atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0 -atomic_add_negative_relaxed(V,X) __atomic_op_return{once}(X,+,V) < 0 -atomic_add_negative_acquire(V,X) __atomic_op_return{acquire}(X,+,V) < 0 -atomic_add_negative_release(V,X) __atomic_op_return{release}(X,+,V) < 0 - -atomic_fetch_andnot(V,X) __atomic_fetch_op{mb}(X,&~,V) -atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{acquire}(X,&~,V) -atomic_fetch_andnot_release(V,X) __atomic_fetch_op{release}(X,&~,V) -atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{once}(X,&~,V) +atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V) +atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) +atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) +atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V) +atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V) +atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V) +atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V) +atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V) + +atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V) +atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V) +atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V) +atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V) + +atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V) +atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V) +atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V) +atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V) + +atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V) +atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V) +atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V) +atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V) + +atomic_inc_return(X) __atomic_op_return{MB}(X,+,1) +atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1) +atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1) +atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1) +atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1) +atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1) +atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1) +atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1) + +atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V) +atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V) +atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V) +atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V) +atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V) +atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V) +atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V) +atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V) + +atomic_dec_return(X) __atomic_op_return{MB}(X,-,1) +atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1) +atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1) +atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1) +atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1) +atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1) +atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1) +atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1) + +atomic_xchg(X,V) __xchg{MB}(X,V) +atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V) +atomic_xchg_release(X,V) __xchg{RELEASE}(X,V) +atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V) +atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W) +atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W) +atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W) +atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W) + +atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) =3D=3D 0 +atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) =3D=3D 0 +atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) =3D=3D 0 +atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0 +atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0 +atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0 +atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0 + +atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V) +atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V) +atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V) +atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V) --=20 2.34.1