[RFC v4 5/5] atomic: Add option for weaker alignment check

Finn Thain posted 5 patches 3 months, 2 weeks ago
There is a newer version of this series
[RFC v4 5/5] atomic: Add option for weaker alignment check
Posted by Finn Thain 3 months, 2 weeks ago
Add a new Kconfig symbol to make CONFIG_DEBUG_ATOMIC more useful on
those architectures which do not align dynamic allocations to 8-byte
boundaries. Without this, CONFIG_DEBUG_ATOMIC produces excessive
WARN splats.
---
Changed since v3:
 - Dropped #include <linux/cache.h> to avoid a build failure on arm64.
 - Rewrote Kconfig help text to better describe preferred alignment.
 - Refactored to avoid line-wrapping and duplication.
---
 include/linux/instrumented.h | 17 ++++++++++++++---
 lib/Kconfig.debug            |  8 ++++++++
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
index 402a999a0d6b..ca946c5860be 100644
--- a/include/linux/instrumented.h
+++ b/include/linux/instrumented.h
@@ -56,6 +56,17 @@ static __always_inline void instrument_read_write(const volatile void *v, size_t
 	kcsan_check_read_write(v, size);
 }
 
+static __always_inline void instrument_atomic_check_alignment(const volatile void *v, size_t size)
+{
+	if (IS_ENABLED(CONFIG_DEBUG_ATOMIC)) {
+		unsigned int mask = size - 1;
+
+		if (IS_ENABLED(CONFIG_DEBUG_ATOMIC_LARGEST_ALIGN))
+			mask &= sizeof(struct { long x; } __aligned_largest) - 1;
+		WARN_ON_ONCE((unsigned long)v & mask);
+	}
+}
+
 /**
  * instrument_atomic_read - instrument atomic read access
  * @v: address of access
@@ -68,7 +79,7 @@ static __always_inline void instrument_atomic_read(const volatile void *v, size_
 {
 	kasan_check_read(v, size);
 	kcsan_check_atomic_read(v, size);
-	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ATOMIC) && ((unsigned long)v & (size - 1)));
+	instrument_atomic_check_alignment(v, size);
 }
 
 /**
@@ -83,7 +94,7 @@ static __always_inline void instrument_atomic_write(const volatile void *v, size
 {
 	kasan_check_write(v, size);
 	kcsan_check_atomic_write(v, size);
-	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ATOMIC) && ((unsigned long)v & (size - 1)));
+	instrument_atomic_check_alignment(v, size);
 }
 
 /**
@@ -98,7 +109,7 @@ static __always_inline void instrument_atomic_read_write(const volatile void *v,
 {
 	kasan_check_write(v, size);
 	kcsan_check_atomic_read_write(v, size);
-	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ATOMIC) && ((unsigned long)v & (size - 1)));
+	instrument_atomic_check_alignment(v, size);
 }
 
 /**
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1c7e30cdfe04..4d3ebe501d17 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1373,6 +1373,14 @@ config DEBUG_ATOMIC
 
 	  This option has potentially significant overhead.
 
+config DEBUG_ATOMIC_LARGEST_ALIGN
+	bool "Check alignment only up to __aligned_largest"
+	depends on DEBUG_ATOMIC
+	help
+	  If you say Y here then the check for natural alignment of
+	  atomic accesses will be constrained to the compiler's largest
+	  alignment for scalar types.
+
 menu "Lock Debugging (spinlocks, mutexes, etc...)"
 
 config LOCK_DEBUGGING_SUPPORT
-- 
2.49.1