Support waiting in smp_cond_load_relaxed_timeout() via
__cmpwait_relaxed(). To ensure that we wake from waiting in
WFE periodically and don't block forever if there are no stores
to ptr, this path is only used when the event-stream is enabled.
Note that when using __cmpwait_relaxed() we ignore the timeout
value, allowing an overshoot by up to the event-stream period.
And, in the unlikely event that the event-stream is unavailable,
fallback to spin-waiting.
Also set SMP_TIMEOUT_POLL_COUNT to 1 so we do the time-check in
each iteration of smp_cond_load_relaxed_timeout().
And finally define ARCH_HAS_CPU_RELAX to indicate that we have
an optimized implementation of cpu_poll_relax().
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Suggested-by: Will Deacon <will@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
Note:
- Add the missing config ARCH_HAS_CPU_RELAX in arch/arm64/Kconfig.
(Catalin Marinas).
arch/arm64/Kconfig | 3 +++
arch/arm64/include/asm/barrier.h | 21 +++++++++++++++++++++
2 files changed, 24 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 38dba5f7e4d2..a3b42d9beed5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1627,6 +1627,9 @@ config ARCH_SUPPORTS_CRASH_DUMP
config ARCH_DEFAULT_CRASH_DUMP
def_bool y
+config ARCH_HAS_CPU_RELAX
+ def_bool y
+
config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
def_bool CRASH_RESERVE
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 9495c4441a46..6190e178db51 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -12,6 +12,7 @@
#include <linux/kasan-checks.h>
#include <asm/alternative-macros.h>
+#include <asm/vdso/processor.h>
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) asm volatile(__nops(n))
@@ -219,6 +220,26 @@ do { \
(typeof(*ptr))VAL; \
})
+/* Re-declared here to avoid include dependency. */
+extern bool arch_timer_evtstrm_available(void);
+
+/*
+ * In the common case, cpu_poll_relax() sits waiting in __cmpwait_relaxed()
+ * for the ptr value to change.
+ *
+ * Since this period is reasonably long, choose SMP_TIMEOUT_POLL_COUNT
+ * to be 1, so smp_cond_load_{relaxed,acquire}_timeout() does a
+ * time-check in each iteration.
+ */
+#define SMP_TIMEOUT_POLL_COUNT 1
+
+#define cpu_poll_relax(ptr, val, timeout_ns) do { \
+ if (arch_timer_evtstrm_available()) \
+ __cmpwait_relaxed(ptr, val); \
+ else \
+ cpu_relax(); \
+} while (0)
+
#include <asm-generic/barrier.h>
#endif /* __ASSEMBLER__ */
--
2.31.1