arch/arm64/include/asm/atomic_lse.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-)
The ATOMIC_FETCH_OP_AND and ATOMIC64_FETCH_OP_AND macros accept 'mb' and
'cl' parameters but never use them in their implementation. These macros
simply delegate to the corresponding andnot functions, which handle the
actual atomic operations and memory barriers.
Signed-off-by: Seongsu Park <sgsu.park@samsung.com>
---
arch/arm64/include/asm/atomic_lse.h | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 87f568a94e55..afad1849c4cf 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -103,17 +103,17 @@ static __always_inline void __lse_atomic_and(int i, atomic_t *v)
return __lse_atomic_andnot(~i, v);
}
-#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
+#define ATOMIC_FETCH_OP_AND(name) \
static __always_inline int \
__lse_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
return __lse_atomic_fetch_andnot##name(~i, v); \
}
-ATOMIC_FETCH_OP_AND(_relaxed, )
-ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
-ATOMIC_FETCH_OP_AND(_release, l, "memory")
-ATOMIC_FETCH_OP_AND( , al, "memory")
+ATOMIC_FETCH_OP_AND(_relaxed)
+ATOMIC_FETCH_OP_AND(_acquire)
+ATOMIC_FETCH_OP_AND(_release)
+ATOMIC_FETCH_OP_AND( )
#undef ATOMIC_FETCH_OP_AND
@@ -210,17 +210,17 @@ static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
return __lse_atomic64_andnot(~i, v);
}
-#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
+#define ATOMIC64_FETCH_OP_AND(name) \
static __always_inline long \
__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
{ \
return __lse_atomic64_fetch_andnot##name(~i, v); \
}
-ATOMIC64_FETCH_OP_AND(_relaxed, )
-ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
-ATOMIC64_FETCH_OP_AND(_release, l, "memory")
-ATOMIC64_FETCH_OP_AND( , al, "memory")
+ATOMIC64_FETCH_OP_AND(_relaxed)
+ATOMIC64_FETCH_OP_AND(_acquire)
+ATOMIC64_FETCH_OP_AND(_release)
+ATOMIC64_FETCH_OP_AND( )
#undef ATOMIC64_FETCH_OP_AND
--
2.34.1
On Wed, 26 Nov 2025 11:10:25 +0900, Seongsu Park wrote:
> The ATOMIC_FETCH_OP_AND and ATOMIC64_FETCH_OP_AND macros accept 'mb' and
> 'cl' parameters but never use them in their implementation. These macros
> simply delegate to the corresponding andnot functions, which handle the
> actual atomic operations and memory barriers.
>
>
Applied to arm64 (for-next/misc), thanks!
[1/1] arm64: atomics: lse: Remove unused parameters from ATOMIC_FETCH_OP_AND macros
https://git.kernel.org/arm64/c/c86d9f8764ba
--
Catalin
On Wed, Nov 26, 2025 at 11:10:25AM +0900, Seongsu Park wrote:
> The ATOMIC_FETCH_OP_AND and ATOMIC64_FETCH_OP_AND macros accept 'mb' and
> 'cl' parameters but never use them in their implementation. These macros
> simply delegate to the corresponding andnot functions, which handle the
> actual atomic operations and memory barriers.
>
> Signed-off-by: Seongsu Park <sgsu.park@samsung.com>
FWIW, this was a leftover from commit:
5e9e43c987b2 ("arm64: atomics: lse: define ANDs in terms of ANDNOTs")
... where I missed the leftover macro arguments.
AFAICT there aren't any other leftover macro arguments from that round
of asm improvments (or otherwise), so:
Acked-by: Mark Rutland <mark.rutland@arm.com>
Mark.
> ---
> arch/arm64/include/asm/atomic_lse.h | 20 ++++++++++----------
> 1 file changed, 10 insertions(+), 10 deletions(-)
>
> diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
> index 87f568a94e55..afad1849c4cf 100644
> --- a/arch/arm64/include/asm/atomic_lse.h
> +++ b/arch/arm64/include/asm/atomic_lse.h
> @@ -103,17 +103,17 @@ static __always_inline void __lse_atomic_and(int i, atomic_t *v)
> return __lse_atomic_andnot(~i, v);
> }
>
> -#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
> +#define ATOMIC_FETCH_OP_AND(name) \
> static __always_inline int \
> __lse_atomic_fetch_and##name(int i, atomic_t *v) \
> { \
> return __lse_atomic_fetch_andnot##name(~i, v); \
> }
>
> -ATOMIC_FETCH_OP_AND(_relaxed, )
> -ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
> -ATOMIC_FETCH_OP_AND(_release, l, "memory")
> -ATOMIC_FETCH_OP_AND( , al, "memory")
> +ATOMIC_FETCH_OP_AND(_relaxed)
> +ATOMIC_FETCH_OP_AND(_acquire)
> +ATOMIC_FETCH_OP_AND(_release)
> +ATOMIC_FETCH_OP_AND( )
>
> #undef ATOMIC_FETCH_OP_AND
>
> @@ -210,17 +210,17 @@ static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
> return __lse_atomic64_andnot(~i, v);
> }
>
> -#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
> +#define ATOMIC64_FETCH_OP_AND(name) \
> static __always_inline long \
> __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
> { \
> return __lse_atomic64_fetch_andnot##name(~i, v); \
> }
>
> -ATOMIC64_FETCH_OP_AND(_relaxed, )
> -ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
> -ATOMIC64_FETCH_OP_AND(_release, l, "memory")
> -ATOMIC64_FETCH_OP_AND( , al, "memory")
> +ATOMIC64_FETCH_OP_AND(_relaxed)
> +ATOMIC64_FETCH_OP_AND(_acquire)
> +ATOMIC64_FETCH_OP_AND(_release)
> +ATOMIC64_FETCH_OP_AND( )
>
> #undef ATOMIC64_FETCH_OP_AND
>
> --
> 2.34.1
>
© 2016 - 2025 Red Hat, Inc.