Extract the arm64 resched logic code to
raw_irqentry_exit_cond_resched() function, which makes the
code more clear when switch to generic entry.
No functional changes.
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
---
arch/arm64/include/asm/preempt.h | 1 +
arch/arm64/kernel/entry-common.c | 17 ++++++++++-------
2 files changed, 11 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 0159b625cc7f..d0f93385bd85 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -85,6 +85,7 @@ static inline bool should_resched(int preempt_offset)
void preempt_schedule(void);
void preempt_schedule_notrace(void);
+void raw_irqentry_exit_cond_resched(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 58d660878c09..5b7df53cfcf6 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -95,6 +95,14 @@ static inline bool arm64_irqentry_exit_need_resched(void)
return true;
}
+void raw_irqentry_exit_cond_resched(void)
+{
+ if (!preempt_count()) {
+ if (need_resched() && arm64_irqentry_exit_need_resched())
+ preempt_schedule_irq();
+ }
+}
+
/*
* Handle IRQ/context state management when exiting to kernel mode.
* After this function returns it is not safe to call regular kernel code,
@@ -119,13 +127,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs,
return;
}
- if (IS_ENABLED(CONFIG_PREEMPTION)) {
- if (!preempt_count()) {
- if (need_resched() &&
- arm64_irqentry_exit_need_resched())
- preempt_schedule_irq();
- }
- }
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ raw_irqentry_exit_cond_resched();
trace_hardirqs_on();
} else {
--
2.34.1