Annotate atomic_add_return() and atomic_sub_return() to avoid signed
overflow instrumentation. They are expected to wrap around.
Signed-off-by: Kees Cook <keescook@chromium.org>
---
Cc: Will Deacon <will@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
---
arch/arm64/include/asm/atomic_lse.h | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 87f568a94e55..a33576b20b52 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -10,6 +10,8 @@
#ifndef __ASM_ATOMIC_LSE_H
#define __ASM_ATOMIC_LSE_H
+#include <linux/overflow.h>
+
#define ATOMIC_OP(op, asm_op) \
static __always_inline void \
__lse_atomic_##op(int i, atomic_t *v) \
@@ -82,13 +84,13 @@ ATOMIC_FETCH_OP_SUB( )
static __always_inline int \
__lse_atomic_add_return##name(int i, atomic_t *v) \
{ \
- return __lse_atomic_fetch_add##name(i, v) + i; \
+ return wrapping_add(int, __lse_atomic_fetch_add##name(i, v), i);\
} \
\
static __always_inline int \
__lse_atomic_sub_return##name(int i, atomic_t *v) \
{ \
- return __lse_atomic_fetch_sub(i, v) - i; \
+ return wrapping_sub(int, __lse_atomic_fetch_sub(i, v), i); \
}
ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
@@ -189,13 +191,13 @@ ATOMIC64_FETCH_OP_SUB( )
static __always_inline long \
__lse_atomic64_add_return##name(s64 i, atomic64_t *v) \
{ \
- return __lse_atomic64_fetch_add##name(i, v) + i; \
+ return wrapping_add(s64, __lse_atomic64_fetch_add##name(i, v), i); \
} \
\
static __always_inline long \
__lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
{ \
- return __lse_atomic64_fetch_sub##name(i, v) - i; \
+ return wrapping_sub(s64, __lse_atomic64_fetch_sub##name(i, v), i); \
}
ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
--
2.34.1
On Wed, Apr 24, 2024 at 12:17:35PM -0700, Kees Cook wrote: > Annotate atomic_add_return() and atomic_sub_return() to avoid signed > overflow instrumentation. They are expected to wrap around. > > Signed-off-by: Kees Cook <keescook@chromium.org> > --- > Cc: Will Deacon <will@kernel.org> > Cc: Peter Zijlstra <peterz@infradead.org> > Cc: Boqun Feng <boqun.feng@gmail.com> > Cc: Mark Rutland <mark.rutland@arm.com> > Cc: Catalin Marinas <catalin.marinas@arm.com> > Cc: linux-arm-kernel@lists.infradead.org > --- > arch/arm64/include/asm/atomic_lse.h | 10 ++++++---- > 1 file changed, 6 insertions(+), 4 deletions(-) How come the ll/sc routines (in atomic_ll_sc.h) don't need the same treatment? If that's just an oversight, then maybe it's better to instrument the higher-level wrappers in asm/atomic.h? Will
On Thu, May 02, 2024 at 12:21:28PM +0100, Will Deacon wrote: > On Wed, Apr 24, 2024 at 12:17:35PM -0700, Kees Cook wrote: > > Annotate atomic_add_return() and atomic_sub_return() to avoid signed > > overflow instrumentation. They are expected to wrap around. > > > > Signed-off-by: Kees Cook <keescook@chromium.org> > > --- > > Cc: Will Deacon <will@kernel.org> > > Cc: Peter Zijlstra <peterz@infradead.org> > > Cc: Boqun Feng <boqun.feng@gmail.com> > > Cc: Mark Rutland <mark.rutland@arm.com> > > Cc: Catalin Marinas <catalin.marinas@arm.com> > > Cc: linux-arm-kernel@lists.infradead.org > > --- > > arch/arm64/include/asm/atomic_lse.h | 10 ++++++---- > > 1 file changed, 6 insertions(+), 4 deletions(-) > > How come the ll/sc routines (in atomic_ll_sc.h) don't need the same > treatment? If that's just an oversight, then maybe it's better to > instrument the higher-level wrappers in asm/atomic.h? Those are all written in asm, so there's no open-coded C arithmetic that the sanitizers will notice. All is well there! :) -- Kees Cook
© 2016 - 2026 Red Hat, Inc.