[PATCH 2/2] arm64/entry: Remove arch_irqentry_exit_need_resched()

Mark Rutland posted 2 patches 2 weeks ago
[PATCH 2/2] arm64/entry: Remove arch_irqentry_exit_need_resched()
Posted by Mark Rutland 2 weeks ago
The only user of arch_irqentry_exit_need_resched() is arm64. As arm64
provides its own preemption logic, there's no need to indirect some of
this via the generic irq entry code.

Remove arch_irqentry_exit_need_resched(), and fold its logic directly
into arm64's entry code.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Ada Couprie Diaz <ada.coupriediaz@arm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Jinjie Ruan <ruanjinjie@huawei.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@kernel.org>
Cc: Vladimir Murzin <vladimir.murzin@arm.com>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/entry-common.h | 27 ---------------------------
 arch/arm64/kernel/entry-common.c      | 27 ++++++++++++++++++++++++++-
 kernel/entry/common.c                 | 16 +---------------
 3 files changed, 27 insertions(+), 43 deletions(-)

diff --git a/arch/arm64/include/asm/entry-common.h b/arch/arm64/include/asm/entry-common.h
index cab8cd78f6938..2b8335ea2a390 100644
--- a/arch/arm64/include/asm/entry-common.h
+++ b/arch/arm64/include/asm/entry-common.h
@@ -27,31 +27,4 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
 
 #define arch_exit_to_user_mode_work arch_exit_to_user_mode_work
 
-static inline bool arch_irqentry_exit_need_resched(void)
-{
-	/*
-	 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
-	 * priority masking is used the GIC irqchip driver will clear DAIF.IF
-	 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
-	 * DAIF we must have handled an NMI, so skip preemption.
-	 */
-	if (system_uses_irq_prio_masking() && read_sysreg(daif))
-		return false;
-
-	/*
-	 * Preempting a task from an IRQ means we leave copies of PSTATE
-	 * on the stack. cpufeature's enable calls may modify PSTATE, but
-	 * resuming one of these preempted tasks would undo those changes.
-	 *
-	 * Only allow a task to be preempted once cpufeatures have been
-	 * enabled.
-	 */
-	if (!system_capabilities_finalized())
-		return false;
-
-	return true;
-}
-
-#define arch_irqentry_exit_need_resched arch_irqentry_exit_need_resched
-
 #endif /* _ASM_ARM64_ENTRY_COMMON_H */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 1aedadf09eb4d..c4481e0e326a7 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -486,6 +486,31 @@ static __always_inline void __el1_pnmi(struct pt_regs *regs,
 	irqentry_nmi_exit(regs, state);
 }
 
+static void arm64_irqentry_exit_cond_resched(void)
+{
+	/*
+	 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
+	 * priority masking is used the GIC irqchip driver will clear DAIF.IF
+	 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
+	 * DAIF we must have handled an NMI, so skip preemption.
+	 */
+	if (system_uses_irq_prio_masking() && read_sysreg(daif))
+		return;
+
+	/*
+	 * Preempting a task from an IRQ means we leave copies of PSTATE
+	 * on the stack. cpufeature's enable calls may modify PSTATE, but
+	 * resuming one of these preempted tasks would undo those changes.
+	 *
+	 * Only allow a task to be preempted once cpufeatures have been
+	 * enabled.
+	 */
+	if (!system_capabilities_finalized())
+		return;
+
+	irqentry_exit_cond_resched();
+}
+
 static __always_inline void __el1_irq(struct pt_regs *regs,
 				      void (*handler)(struct pt_regs *))
 {
@@ -497,7 +522,7 @@ static __always_inline void __el1_irq(struct pt_regs *regs,
 	do_interrupt_handler(regs, handler);
 	irq_exit_rcu();
 
-	irqentry_exit_cond_resched();
+	arm64_irqentry_exit_cond_resched();
 
 	exit_to_kernel_mode(regs, state);
 }
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index af9cae1f225e3..28351d76cfeb3 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -171,20 +171,6 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
 	return ret;
 }
 
-/**
- * arch_irqentry_exit_need_resched - Architecture specific need resched function
- *
- * Invoked from raw_irqentry_exit_cond_resched() to check if resched is needed.
- * Defaults return true.
- *
- * The main purpose is to permit arch to avoid preemption of a task from an IRQ.
- */
-static inline bool arch_irqentry_exit_need_resched(void);
-
-#ifndef arch_irqentry_exit_need_resched
-static inline bool arch_irqentry_exit_need_resched(void) { return true; }
-#endif
-
 void raw_irqentry_exit_cond_resched(void)
 {
 	if (!preempt_count()) {
@@ -192,7 +178,7 @@ void raw_irqentry_exit_cond_resched(void)
 		rcu_irq_exit_check_preempt();
 		if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
 			WARN_ON_ONCE(!on_thread_stack());
-		if (need_resched() && arch_irqentry_exit_need_resched())
+		if (need_resched())
 			preempt_schedule_irq();
 	}
 }
-- 
2.30.2