The generic entry has the same logic as need_irq_preemption()
macro and use a helper function to check other resched condition.
In preparation for moving arm64 over to the generic entry code,
check and expand need_irq_preemption() ahead and extract arm64 resched
check code to a helper function.
No functional changes.
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
arch/arm64/include/asm/preempt.h | 1 +
arch/arm64/kernel/entry-common.c | 28 +++++++++++++++++-----------
2 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 0159b625cc7f..d0f93385bd85 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -85,6 +85,7 @@ static inline bool should_resched(int preempt_offset)
void preempt_schedule(void);
void preempt_schedule_notrace(void);
+void raw_irqentry_exit_cond_resched(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index efd1a990d138..80b47ca02db2 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -77,17 +77,10 @@ static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs)
#ifdef CONFIG_PREEMPT_DYNAMIC
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-#define need_irq_preemption() \
- (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
-#else
-#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
#endif
static inline bool arm64_need_resched(void)
{
- if (!need_irq_preemption())
- return false;
-
/*
* DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
* priority masking is used the GIC irqchip driver will clear DAIF.IF
@@ -111,6 +104,22 @@ static inline bool arm64_need_resched(void)
return true;
}
+void raw_irqentry_exit_cond_resched(void)
+{
+#ifdef CONFIG_PREEMPT_DYNAMIC
+ if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+ return;
+#else
+ if (!IS_ENABLED(CONFIG_PREEMPTION))
+ return;
+#endif
+
+ if (!preempt_count()) {
+ if (need_resched() && arm64_need_resched())
+ preempt_schedule_irq();
+ }
+}
+
/*
* Handle IRQ/context state management when exiting to kernel mode.
* After this function returns it is not safe to call regular kernel code,
@@ -133,10 +142,7 @@ static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs,
return;
}
- if (!preempt_count() && need_resched()) {
- if (arm64_need_resched())
- preempt_schedule_irq();
- }
+ raw_irqentry_exit_cond_resched();
trace_hardirqs_on();
} else {
--
2.34.1