Check dynamic resched alone when PREEMPT_DYNAMIC enabled.
No functional changes.
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
arch/arm64/include/asm/preempt.h | 3 +++
arch/arm64/kernel/entry-common.c | 21 +++++++++++----------
2 files changed, 14 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index d0f93385bd85..0f0ba250efe8 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -93,11 +93,14 @@ void dynamic_preempt_schedule(void);
#define __preempt_schedule() dynamic_preempt_schedule()
void dynamic_preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
#else /* CONFIG_PREEMPT_DYNAMIC */
#define __preempt_schedule() preempt_schedule()
#define __preempt_schedule_notrace() preempt_schedule_notrace()
+#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
#endif /* CONFIG_PREEMPT_DYNAMIC */
#endif /* CONFIG_PREEMPTION */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 3b110dcf4fa3..152216201f84 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -61,10 +61,6 @@ static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs)
return ret;
}
-#ifdef CONFIG_PREEMPT_DYNAMIC
-DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-#endif
-
static inline bool arm64_irqentry_exit_need_resched(void)
{
/*
@@ -92,17 +88,22 @@ static inline bool arm64_irqentry_exit_need_resched(void)
void raw_irqentry_exit_cond_resched(void)
{
-#ifdef CONFIG_PREEMPT_DYNAMIC
- if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
- return;
-#endif
-
if (!preempt_count()) {
if (need_resched() && arm64_irqentry_exit_need_resched())
preempt_schedule_irq();
}
}
+#ifdef CONFIG_PREEMPT_DYNAMIC
+DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void)
+{
+ if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+ return;
+ raw_irqentry_exit_cond_resched();
+}
+#endif
+
/*
* Handle IRQ/context state management when exiting to kernel mode.
* After this function returns it is not safe to call regular kernel code,
@@ -128,7 +129,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs,
}
if (IS_ENABLED(CONFIG_PREEMPTION))
- raw_irqentry_exit_cond_resched();
+ irqentry_exit_cond_resched();
trace_hardirqs_on();
} else {
--
2.34.1