arch/loongarch/kernel/genex.S | 33 +++++++++++++++++++-------------- arch/loongarch/kernel/idle.c | 3 +-- arch/loongarch/kernel/reset.c | 23 ++++++++++++++--------- 3 files changed, 34 insertions(+), 25 deletions(-)
From: Peter Zijlstra <peterz@infradead.org>
Loongson re-enables interrupts on its idle routine and performs a
TIF_NEED_RESCHED check afterwards before putting the CPU to sleep.
The IRQs firing between the check and the idling instruction may set the
TIF_NEED_RESCHED flag. In order to deal with the such a race, IRQs
interrupting __arch_cpu_idle() rollback their return address to the
beginning of __arch_cpu_idle() so that TIF_NEED_RESCHED is checked
again before going back to sleep.
However idle IRQs can also queue timers that may require a tick
reprogramming through a new generic idle loop iteration but those timers
would go unnoticed here because __arch_cpu_idle() only checks
TIF_NEED_RESCHED. It doesn't check for pending timers.
Fix this with fast-forwarding idle IRQs return value to the end of the
idle routine instead of the beginning, so that the generic idle loop
handles both TIF_NEED_RESCHED and pending timers.
Fixes: 0603839b18f4 ("LoongArch: Add exception/interrupt handling")
Not-yet-signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
---
arch/loongarch/kernel/genex.S | 33 +++++++++++++++++++--------------
arch/loongarch/kernel/idle.c | 3 +--
arch/loongarch/kernel/reset.c | 23 ++++++++++++++---------
3 files changed, 34 insertions(+), 25 deletions(-)
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
index 86d5d90ebefe..ab2e9a262ee2 100644
--- a/arch/loongarch/kernel/genex.S
+++ b/arch/loongarch/kernel/genex.S
@@ -18,28 +18,33 @@
.align 5
SYM_FUNC_START(__arch_cpu_idle)
- /* start of rollback region */
- LONG_L t0, tp, TI_FLAGS
- nop
- andi t0, t0, _TIF_NEED_RESCHED
- bnez t0, 1f
- nop
- nop
- nop
+ /* start of idle interrupt region */
+ li.w t0, CSR_CRMD_IE
+ /* idle instruction needs irq enabled */
+ csrxchg t0, t0, LOONGARCH_CSR_CRMD
+ /*
+ * If an interrupt lands here; between enabling interrupts above and
+ * going idle on the next instruction, we must *NOT* go idle since the
+ * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
+ * reprogramming. Fall through -- see handle_vint() below -- and have
+ * the idle loop take care of things.
+ */
idle 0
- /* end of rollback region */
-1: jr ra
+ nop
+ /* end of idle interrupt region */
+SYM_INNER_LABEL(__arch_cpu_idle_exit, SYM_L_LOCAL)
+ jr ra
SYM_FUNC_END(__arch_cpu_idle)
SYM_CODE_START(handle_vint)
UNWIND_HINT_UNDEFINED
BACKUP_T0T1
SAVE_ALL
- la_abs t1, __arch_cpu_idle
+ la_abs t1, __arch_cpu_idle_exit
LONG_L t0, sp, PT_ERA
- /* 32 byte rollback region */
- ori t0, t0, 0x1f
- xori t0, t0, 0x1f
+ /* 16 byte idle interrupt region */
+ ori t0, t0, 0x0f
+ addi.d t0, t0, 1
bne t0, t1, 1f
LONG_S t0, sp, PT_ERA
1: move a0, sp
diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
index 0b5dd2faeb90..54b247d8cdb6 100644
--- a/arch/loongarch/kernel/idle.c
+++ b/arch/loongarch/kernel/idle.c
@@ -11,7 +11,6 @@
void __cpuidle arch_cpu_idle(void)
{
- raw_local_irq_enable();
- __arch_cpu_idle(); /* idle instruction needs irq enabled */
+ __arch_cpu_idle();
raw_local_irq_disable();
}
diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
index 1ef8c6383535..d78e8a1064a1 100644
--- a/arch/loongarch/kernel/reset.c
+++ b/arch/loongarch/kernel/reset.c
@@ -20,6 +20,11 @@
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
+static __always_inline void native_halt(void)
+{
+ asm volatile("idle 0" : : : "memory");
+}
+
void machine_halt(void)
{
#ifdef CONFIG_SMP
@@ -32,9 +37,9 @@ void machine_halt(void)
pr_notice("\n\n** You can safely turn off the power now **\n\n");
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
- while (true) {
- __arch_cpu_idle();
- }
+ while (1) {
+ native_halt();
+ };
}
void machine_power_off(void)
@@ -52,9 +57,9 @@ void machine_power_off(void)
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
#endif
- while (true) {
- __arch_cpu_idle();
- }
+ while (1) {
+ native_halt();
+ };
}
void machine_restart(char *command)
@@ -73,7 +78,7 @@ void machine_restart(char *command)
if (!acpi_disabled)
acpi_reboot();
- while (true) {
- __arch_cpu_idle();
- }
+ while (1) {
+ native_halt();
+ };
}
--
2.48.1
On Thu, Feb 06, 2025 at 05:29:22PM +0100, Marco Crivellari wrote:
> From: Peter Zijlstra <peterz@infradead.org>
>
> Loongson re-enables interrupts on its idle routine and performs a
> TIF_NEED_RESCHED check afterwards before putting the CPU to sleep.
>
> The IRQs firing between the check and the idling instruction may set the
> TIF_NEED_RESCHED flag. In order to deal with the such a race, IRQs
> interrupting __arch_cpu_idle() rollback their return address to the
> beginning of __arch_cpu_idle() so that TIF_NEED_RESCHED is checked
> again before going back to sleep.
>
> However idle IRQs can also queue timers that may require a tick
> reprogramming through a new generic idle loop iteration but those timers
> would go unnoticed here because __arch_cpu_idle() only checks
> TIF_NEED_RESCHED. It doesn't check for pending timers.
>
> Fix this with fast-forwarding idle IRQs return value to the end of the
> idle routine instead of the beginning, so that the generic idle loop
> handles both TIF_NEED_RESCHED and pending timers.
>
> Fixes: 0603839b18f4 ("LoongArch: Add exception/interrupt handling")
> Not-yet-signed-off-by: Peter Zijlstra <peterz@infradead.org>
Feel free to make that:
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Cc: WANG Xuerui <kernel@xen0n.name>
> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
> ---
> arch/loongarch/kernel/genex.S | 33 +++++++++++++++++++--------------
> arch/loongarch/kernel/idle.c | 3 +--
> arch/loongarch/kernel/reset.c | 23 ++++++++++++++---------
> 3 files changed, 34 insertions(+), 25 deletions(-)
>
> diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
> index 86d5d90ebefe..ab2e9a262ee2 100644
> --- a/arch/loongarch/kernel/genex.S
> +++ b/arch/loongarch/kernel/genex.S
> @@ -18,28 +18,33 @@
>
> .align 5
> SYM_FUNC_START(__arch_cpu_idle)
> - /* start of rollback region */
> - LONG_L t0, tp, TI_FLAGS
> - nop
> - andi t0, t0, _TIF_NEED_RESCHED
> - bnez t0, 1f
> - nop
> - nop
> - nop
> + /* start of idle interrupt region */
> + li.w t0, CSR_CRMD_IE
> + /* idle instruction needs irq enabled */
> + csrxchg t0, t0, LOONGARCH_CSR_CRMD
> + /*
> + * If an interrupt lands here; between enabling interrupts above and
> + * going idle on the next instruction, we must *NOT* go idle since the
> + * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
> + * reprogramming. Fall through -- see handle_vint() below -- and have
> + * the idle loop take care of things.
> + */
> idle 0
> - /* end of rollback region */
> -1: jr ra
> + nop
> + /* end of idle interrupt region */
> +SYM_INNER_LABEL(__arch_cpu_idle_exit, SYM_L_LOCAL)
> + jr ra
> SYM_FUNC_END(__arch_cpu_idle)
>
> SYM_CODE_START(handle_vint)
> UNWIND_HINT_UNDEFINED
> BACKUP_T0T1
> SAVE_ALL
> - la_abs t1, __arch_cpu_idle
> + la_abs t1, __arch_cpu_idle_exit
> LONG_L t0, sp, PT_ERA
> - /* 32 byte rollback region */
> - ori t0, t0, 0x1f
> - xori t0, t0, 0x1f
> + /* 16 byte idle interrupt region */
> + ori t0, t0, 0x0f
> + addi.d t0, t0, 1
> bne t0, t1, 1f
> LONG_S t0, sp, PT_ERA
> 1: move a0, sp
> diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
> index 0b5dd2faeb90..54b247d8cdb6 100644
> --- a/arch/loongarch/kernel/idle.c
> +++ b/arch/loongarch/kernel/idle.c
> @@ -11,7 +11,6 @@
>
> void __cpuidle arch_cpu_idle(void)
> {
> - raw_local_irq_enable();
> - __arch_cpu_idle(); /* idle instruction needs irq enabled */
> + __arch_cpu_idle();
> raw_local_irq_disable();
> }
> diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
> index 1ef8c6383535..d78e8a1064a1 100644
> --- a/arch/loongarch/kernel/reset.c
> +++ b/arch/loongarch/kernel/reset.c
> @@ -20,6 +20,11 @@
> void (*pm_power_off)(void);
> EXPORT_SYMBOL(pm_power_off);
>
> +static __always_inline void native_halt(void)
> +{
> + asm volatile("idle 0" : : : "memory");
> +}
> +
> void machine_halt(void)
> {
> #ifdef CONFIG_SMP
> @@ -32,9 +37,9 @@ void machine_halt(void)
> pr_notice("\n\n** You can safely turn off the power now **\n\n");
> console_flush_on_panic(CONSOLE_FLUSH_PENDING);
>
> - while (true) {
> - __arch_cpu_idle();
> - }
> + while (1) {
> + native_halt();
> + };
> }
>
> void machine_power_off(void)
> @@ -52,9 +57,9 @@ void machine_power_off(void)
> efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
> #endif
>
> - while (true) {
> - __arch_cpu_idle();
> - }
> + while (1) {
> + native_halt();
> + };
> }
>
> void machine_restart(char *command)
> @@ -73,7 +78,7 @@ void machine_restart(char *command)
> if (!acpi_disabled)
> acpi_reboot();
>
> - while (true) {
> - __arch_cpu_idle();
> - }
> + while (1) {
> + native_halt();
> + };
> }
> --
> 2.48.1
>
Hi, Marco,
Thank you for your patch, the logic is correct but the code can be improved.
On Fri, Feb 7, 2025 at 12:30 AM Marco Crivellari
<marco.crivellari@suse.com> wrote:
>
> From: Peter Zijlstra <peterz@infradead.org>
>
> Loongson re-enables interrupts on its idle routine and performs a
> TIF_NEED_RESCHED check afterwards before putting the CPU to sleep.
>
> The IRQs firing between the check and the idling instruction may set the
> TIF_NEED_RESCHED flag. In order to deal with the such a race, IRQs
> interrupting __arch_cpu_idle() rollback their return address to the
> beginning of __arch_cpu_idle() so that TIF_NEED_RESCHED is checked
> again before going back to sleep.
>
> However idle IRQs can also queue timers that may require a tick
> reprogramming through a new generic idle loop iteration but those timers
> would go unnoticed here because __arch_cpu_idle() only checks
> TIF_NEED_RESCHED. It doesn't check for pending timers.
>
> Fix this with fast-forwarding idle IRQs return value to the end of the
> idle routine instead of the beginning, so that the generic idle loop
> handles both TIF_NEED_RESCHED and pending timers.
>
> Fixes: 0603839b18f4 ("LoongArch: Add exception/interrupt handling")
> Not-yet-signed-off-by: Peter Zijlstra <peterz@infradead.org>
> Cc: WANG Xuerui <kernel@xen0n.name>
> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
> ---
> arch/loongarch/kernel/genex.S | 33 +++++++++++++++++++--------------
> arch/loongarch/kernel/idle.c | 3 +--
> arch/loongarch/kernel/reset.c | 23 ++++++++++++++---------
> 3 files changed, 34 insertions(+), 25 deletions(-)
>
> diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
> index 86d5d90ebefe..ab2e9a262ee2 100644
> --- a/arch/loongarch/kernel/genex.S
> +++ b/arch/loongarch/kernel/genex.S
> @@ -18,28 +18,33 @@
>
> .align 5
> SYM_FUNC_START(__arch_cpu_idle)
> - /* start of rollback region */
> - LONG_L t0, tp, TI_FLAGS
> - nop
> - andi t0, t0, _TIF_NEED_RESCHED
> - bnez t0, 1f
> - nop
> - nop
> - nop
> + /* start of idle interrupt region */
> + li.w t0, CSR_CRMD_IE
It is better to replace it with "ori t0, zero, CSR_CRMD_IE", ensuring
single instruction.
> + /* idle instruction needs irq enabled */
> + csrxchg t0, t0, LOONGARCH_CSR_CRMD
> + /*
> + * If an interrupt lands here; between enabling interrupts above and
> + * going idle on the next instruction, we must *NOT* go idle since the
> + * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
> + * reprogramming. Fall through -- see handle_vint() below -- and have
> + * the idle loop take care of things.
> + */
> idle 0
> - /* end of rollback region */
> -1: jr ra
> + nop
Remove the "nop", of course it should be together with the later modification.
> + /* end of idle interrupt region */
> +SYM_INNER_LABEL(__arch_cpu_idle_exit, SYM_L_LOCAL)
> + jr ra
> SYM_FUNC_END(__arch_cpu_idle)
>
> SYM_CODE_START(handle_vint)
> UNWIND_HINT_UNDEFINED
> BACKUP_T0T1
> SAVE_ALL
> - la_abs t1, __arch_cpu_idle
> + la_abs t1, __arch_cpu_idle_exit
> LONG_L t0, sp, PT_ERA
> - /* 32 byte rollback region */
> - ori t0, t0, 0x1f
> - xori t0, t0, 0x1f
> + /* 16 byte idle interrupt region */
> + ori t0, t0, 0x0f
> + addi.d t0, t0, 1
Replace these two instructions with "ori t0, t0, 0xc", and "16 bytes"
can be replaced with "3 instructions".
> bne t0, t1, 1f
> LONG_S t0, sp, PT_ERA
> 1: move a0, sp
> diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
> index 0b5dd2faeb90..54b247d8cdb6 100644
> --- a/arch/loongarch/kernel/idle.c
> +++ b/arch/loongarch/kernel/idle.c
> @@ -11,7 +11,6 @@
>
> void __cpuidle arch_cpu_idle(void)
> {
> - raw_local_irq_enable();
> - __arch_cpu_idle(); /* idle instruction needs irq enabled */
> + __arch_cpu_idle();
> raw_local_irq_disable();
> }
> diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
> index 1ef8c6383535..d78e8a1064a1 100644
> --- a/arch/loongarch/kernel/reset.c
> +++ b/arch/loongarch/kernel/reset.c
> @@ -20,6 +20,11 @@
> void (*pm_power_off)(void);
> EXPORT_SYMBOL(pm_power_off);
>
> +static __always_inline void native_halt(void)
> +{
> + asm volatile("idle 0" : : : "memory");
> +}
Just use the inline assembly at the call sites, please.
Huacai
> +
> void machine_halt(void)
> {
> #ifdef CONFIG_SMP
> @@ -32,9 +37,9 @@ void machine_halt(void)
> pr_notice("\n\n** You can safely turn off the power now **\n\n");
> console_flush_on_panic(CONSOLE_FLUSH_PENDING);
>
> - while (true) {
> - __arch_cpu_idle();
> - }
> + while (1) {
> + native_halt();
> + };
> }
>
> void machine_power_off(void)
> @@ -52,9 +57,9 @@ void machine_power_off(void)
> efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
> #endif
>
> - while (true) {
> - __arch_cpu_idle();
> - }
> + while (1) {
> + native_halt();
> + };
> }
>
> void machine_restart(char *command)
> @@ -73,7 +78,7 @@ void machine_restart(char *command)
> if (!acpi_disabled)
> acpi_reboot();
>
> - while (true) {
> - __arch_cpu_idle();
> - }
> + while (1) {
> + native_halt();
> + };
> }
> --
> 2.48.1
>
Hi,
Thanks for your replies!
I will make a new version with the aforementioned changes.
Marco
On Fri, Feb 7, 2025 at 9:18 AM Huacai Chen <chenhuacai@kernel.org> wrote:
>
> Hi, Marco,
>
> Thank you for your patch, the logic is correct but the code can be improved.
>
> On Fri, Feb 7, 2025 at 12:30 AM Marco Crivellari
> <marco.crivellari@suse.com> wrote:
> >
> > From: Peter Zijlstra <peterz@infradead.org>
> >
> > Loongson re-enables interrupts on its idle routine and performs a
> > TIF_NEED_RESCHED check afterwards before putting the CPU to sleep.
> >
> > The IRQs firing between the check and the idling instruction may set the
> > TIF_NEED_RESCHED flag. In order to deal with the such a race, IRQs
> > interrupting __arch_cpu_idle() rollback their return address to the
> > beginning of __arch_cpu_idle() so that TIF_NEED_RESCHED is checked
> > again before going back to sleep.
> >
> > However idle IRQs can also queue timers that may require a tick
> > reprogramming through a new generic idle loop iteration but those timers
> > would go unnoticed here because __arch_cpu_idle() only checks
> > TIF_NEED_RESCHED. It doesn't check for pending timers.
> >
> > Fix this with fast-forwarding idle IRQs return value to the end of the
> > idle routine instead of the beginning, so that the generic idle loop
> > handles both TIF_NEED_RESCHED and pending timers.
> >
> > Fixes: 0603839b18f4 ("LoongArch: Add exception/interrupt handling")
> > Not-yet-signed-off-by: Peter Zijlstra <peterz@infradead.org>
> > Cc: WANG Xuerui <kernel@xen0n.name>
> > Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
> > Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
> > ---
> > arch/loongarch/kernel/genex.S | 33 +++++++++++++++++++--------------
> > arch/loongarch/kernel/idle.c | 3 +--
> > arch/loongarch/kernel/reset.c | 23 ++++++++++++++---------
> > 3 files changed, 34 insertions(+), 25 deletions(-)
> >
> > diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
> > index 86d5d90ebefe..ab2e9a262ee2 100644
> > --- a/arch/loongarch/kernel/genex.S
> > +++ b/arch/loongarch/kernel/genex.S
> > @@ -18,28 +18,33 @@
> >
> > .align 5
> > SYM_FUNC_START(__arch_cpu_idle)
> > - /* start of rollback region */
> > - LONG_L t0, tp, TI_FLAGS
> > - nop
> > - andi t0, t0, _TIF_NEED_RESCHED
> > - bnez t0, 1f
> > - nop
> > - nop
> > - nop
> > + /* start of idle interrupt region */
> > + li.w t0, CSR_CRMD_IE
> It is better to replace it with "ori t0, zero, CSR_CRMD_IE", ensuring
> single instruction.
>
> > + /* idle instruction needs irq enabled */
> > + csrxchg t0, t0, LOONGARCH_CSR_CRMD
> > + /*
> > + * If an interrupt lands here; between enabling interrupts above and
> > + * going idle on the next instruction, we must *NOT* go idle since the
> > + * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
> > + * reprogramming. Fall through -- see handle_vint() below -- and have
> > + * the idle loop take care of things.
> > + */
> > idle 0
> > - /* end of rollback region */
> > -1: jr ra
> > + nop
> Remove the "nop", of course it should be together with the later modification.
>
> > + /* end of idle interrupt region */
> > +SYM_INNER_LABEL(__arch_cpu_idle_exit, SYM_L_LOCAL)
> > + jr ra
> > SYM_FUNC_END(__arch_cpu_idle)
> >
> > SYM_CODE_START(handle_vint)
> > UNWIND_HINT_UNDEFINED
> > BACKUP_T0T1
> > SAVE_ALL
> > - la_abs t1, __arch_cpu_idle
> > + la_abs t1, __arch_cpu_idle_exit
> > LONG_L t0, sp, PT_ERA
> > - /* 32 byte rollback region */
> > - ori t0, t0, 0x1f
> > - xori t0, t0, 0x1f
> > + /* 16 byte idle interrupt region */
> > + ori t0, t0, 0x0f
> > + addi.d t0, t0, 1
> Replace these two instructions with "ori t0, t0, 0xc", and "16 bytes"
> can be replaced with "3 instructions".
>
> > bne t0, t1, 1f
> > LONG_S t0, sp, PT_ERA
> > 1: move a0, sp
> > diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
> > index 0b5dd2faeb90..54b247d8cdb6 100644
> > --- a/arch/loongarch/kernel/idle.c
> > +++ b/arch/loongarch/kernel/idle.c
> > @@ -11,7 +11,6 @@
> >
> > void __cpuidle arch_cpu_idle(void)
> > {
> > - raw_local_irq_enable();
> > - __arch_cpu_idle(); /* idle instruction needs irq enabled */
> > + __arch_cpu_idle();
> > raw_local_irq_disable();
> > }
> > diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
> > index 1ef8c6383535..d78e8a1064a1 100644
> > --- a/arch/loongarch/kernel/reset.c
> > +++ b/arch/loongarch/kernel/reset.c
> > @@ -20,6 +20,11 @@
> > void (*pm_power_off)(void);
> > EXPORT_SYMBOL(pm_power_off);
> >
> > +static __always_inline void native_halt(void)
> > +{
> > + asm volatile("idle 0" : : : "memory");
> > +}
> Just use the inline assembly at the call sites, please.
>
> Huacai
>
> > +
> > void machine_halt(void)
> > {
> > #ifdef CONFIG_SMP
> > @@ -32,9 +37,9 @@ void machine_halt(void)
> > pr_notice("\n\n** You can safely turn off the power now **\n\n");
> > console_flush_on_panic(CONSOLE_FLUSH_PENDING);
> >
> > - while (true) {
> > - __arch_cpu_idle();
> > - }
> > + while (1) {
> > + native_halt();
> > + };
> > }
> >
> > void machine_power_off(void)
> > @@ -52,9 +57,9 @@ void machine_power_off(void)
> > efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
> > #endif
> >
> > - while (true) {
> > - __arch_cpu_idle();
> > - }
> > + while (1) {
> > + native_halt();
> > + };
> > }
> >
> > void machine_restart(char *command)
> > @@ -73,7 +78,7 @@ void machine_restart(char *command)
> > if (!acpi_disabled)
> > acpi_reboot();
> >
> > - while (true) {
> > - __arch_cpu_idle();
> > - }
> > + while (1) {
> > + native_halt();
> > + };
> > }
> > --
> > 2.48.1
> >
--
yes, it had better skip out inner loop and let idle framework handle it.
Tested-by: Bibo Mao <maobibo@loongson.cn>
On 2025/2/7 上午12:29, Marco Crivellari wrote:
> From: Peter Zijlstra <peterz@infradead.org>
>
> Loongson re-enables interrupts on its idle routine and performs a
> TIF_NEED_RESCHED check afterwards before putting the CPU to sleep.
>
> The IRQs firing between the check and the idling instruction may set the
> TIF_NEED_RESCHED flag. In order to deal with the such a race, IRQs
> interrupting __arch_cpu_idle() rollback their return address to the
> beginning of __arch_cpu_idle() so that TIF_NEED_RESCHED is checked
> again before going back to sleep.
>
> However idle IRQs can also queue timers that may require a tick
> reprogramming through a new generic idle loop iteration but those timers
> would go unnoticed here because __arch_cpu_idle() only checks
> TIF_NEED_RESCHED. It doesn't check for pending timers.
>
> Fix this with fast-forwarding idle IRQs return value to the end of the
> idle routine instead of the beginning, so that the generic idle loop
> handles both TIF_NEED_RESCHED and pending timers.
>
> Fixes: 0603839b18f4 ("LoongArch: Add exception/interrupt handling")
> Not-yet-signed-off-by: Peter Zijlstra <peterz@infradead.org>
> Cc: WANG Xuerui <kernel@xen0n.name>
> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
> ---
> arch/loongarch/kernel/genex.S | 33 +++++++++++++++++++--------------
> arch/loongarch/kernel/idle.c | 3 +--
> arch/loongarch/kernel/reset.c | 23 ++++++++++++++---------
> 3 files changed, 34 insertions(+), 25 deletions(-)
>
> diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
> index 86d5d90ebefe..ab2e9a262ee2 100644
> --- a/arch/loongarch/kernel/genex.S
> +++ b/arch/loongarch/kernel/genex.S
> @@ -18,28 +18,33 @@
>
> .align 5
> SYM_FUNC_START(__arch_cpu_idle)
> - /* start of rollback region */
> - LONG_L t0, tp, TI_FLAGS
> - nop
> - andi t0, t0, _TIF_NEED_RESCHED
> - bnez t0, 1f
> - nop
> - nop
> - nop
> + /* start of idle interrupt region */
> + li.w t0, CSR_CRMD_IE
> + /* idle instruction needs irq enabled */
> + csrxchg t0, t0, LOONGARCH_CSR_CRMD
> + /*
> + * If an interrupt lands here; between enabling interrupts above and
> + * going idle on the next instruction, we must *NOT* go idle since the
> + * interrupt could have set TIF_NEED_RESCHED or caused an timer to need
> + * reprogramming. Fall through -- see handle_vint() below -- and have
> + * the idle loop take care of things.
> + */
> idle 0
> - /* end of rollback region */
> -1: jr ra
> + nop
> + /* end of idle interrupt region */
> +SYM_INNER_LABEL(__arch_cpu_idle_exit, SYM_L_LOCAL)
> + jr ra
> SYM_FUNC_END(__arch_cpu_idle)
>
> SYM_CODE_START(handle_vint)
> UNWIND_HINT_UNDEFINED
> BACKUP_T0T1
> SAVE_ALL
> - la_abs t1, __arch_cpu_idle
> + la_abs t1, __arch_cpu_idle_exit
> LONG_L t0, sp, PT_ERA
> - /* 32 byte rollback region */
> - ori t0, t0, 0x1f
> - xori t0, t0, 0x1f
> + /* 16 byte idle interrupt region */
> + ori t0, t0, 0x0f
> + addi.d t0, t0, 1
> bne t0, t1, 1f
> LONG_S t0, sp, PT_ERA
> 1: move a0, sp
> diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
> index 0b5dd2faeb90..54b247d8cdb6 100644
> --- a/arch/loongarch/kernel/idle.c
> +++ b/arch/loongarch/kernel/idle.c
> @@ -11,7 +11,6 @@
>
> void __cpuidle arch_cpu_idle(void)
> {
> - raw_local_irq_enable();
> - __arch_cpu_idle(); /* idle instruction needs irq enabled */
> + __arch_cpu_idle();
> raw_local_irq_disable();
> }
> diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
> index 1ef8c6383535..d78e8a1064a1 100644
> --- a/arch/loongarch/kernel/reset.c
> +++ b/arch/loongarch/kernel/reset.c
> @@ -20,6 +20,11 @@
> void (*pm_power_off)(void);
> EXPORT_SYMBOL(pm_power_off);
>
> +static __always_inline void native_halt(void)
> +{
> + asm volatile("idle 0" : : : "memory");
> +}
> +
> void machine_halt(void)
> {
> #ifdef CONFIG_SMP
> @@ -32,9 +37,9 @@ void machine_halt(void)
> pr_notice("\n\n** You can safely turn off the power now **\n\n");
> console_flush_on_panic(CONSOLE_FLUSH_PENDING);
>
> - while (true) {
> - __arch_cpu_idle();
> - }
> + while (1) {
> + native_halt();
> + };
> }
>
> void machine_power_off(void)
> @@ -52,9 +57,9 @@ void machine_power_off(void)
> efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
> #endif
>
> - while (true) {
> - __arch_cpu_idle();
> - }
> + while (1) {
> + native_halt();
> + };
> }
>
> void machine_restart(char *command)
> @@ -73,7 +78,7 @@ void machine_restart(char *command)
> if (!acpi_disabled)
> acpi_reboot();
>
> - while (true) {
> - __arch_cpu_idle();
> - }
> + while (1) {
> + native_halt();
> + };
> }
>
© 2016 - 2025 Red Hat, Inc.