Add tif_bitset_relaxed_wait() (and tif_need_resched_relaxed_wait()
which wraps it) which takes the thread_info bit and timeout duration
as parameters and waits until the bit is set or for the expiration
of the timeout.
The wait is implemented via smp_cond_load_relaxed_timeout().
smp_cond_load_acquire_timeout() essentially provides the pattern used
in poll_idle() where we spin in a loop waiting for the flag to change
until a timeout occurs.
tif_need_resched_relaxed_wait() allows us to abstract out the internals
of waiting, scheduler specific details etc.
Placed in linux/sched/idle.h instead of linux/thread_info.h to work
around recursive include hell.
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: linux-pm@vger.kernel.org
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
include/linux/sched/idle.h | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 8465ff1f20d1..6780ad760abb 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -3,6 +3,7 @@
#define _LINUX_SCHED_IDLE_H
#include <linux/sched.h>
+#include <linux/sched/clock.h>
enum cpu_idle_type {
__CPU_NOT_IDLE = 0,
@@ -113,4 +114,32 @@ static __always_inline void current_clr_polling(void)
}
#endif
+/*
+ * Caller needs to make sure that the thread context cannot be preempted
+ * or migrated, so current_thread_info() cannot change from under us.
+ *
+ * This also allows us to safely stay in the local_clock domain.
+ */
+static inline bool tif_bitset_relaxed_wait(int bit, s64 timeout_ns)
+{
+ unsigned int flags;
+
+ flags = smp_cond_load_relaxed_timeout(¤t_thread_info()->flags,
+ (VAL & bit),
+ (s64)local_clock_noinstr(),
+ timeout_ns);
+ return flags & bit;
+}
+
+/**
+ * tif_need_resched_relaxed_wait() - Wait for need-resched being set with
+ * no ordering guarantees until a timeout expires.
+ *
+ * @timeout_ns: timeout value.
+ */
+static inline bool tif_need_resched_relaxed_wait(s64 timeout_ns)
+{
+ return tif_bitset_relaxed_wait(TIF_NEED_RESCHED, timeout_ns);
+}
+
#endif /* _LINUX_SCHED_IDLE_H */
--
2.31.1