arch/arm64/include/asm/suspend.h | 2 +- arch/arm64/mm/proc.S | 22 ++++++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-)
TCR2_ELx.E0POE is set during smp_init().
However, this bit is not reprogrammed when the CPU enters suspension and
later resumes via cpu_resume(), as __cpu_setup() does not re-enable E0POE
and there is no save/restore logic for the TCR2_ELx system register.
As a result, the E0POE feature no longer works after cpu_resume().
To address this, save and restore TCR2_EL1 in the cpu_suspend()/cpu_resume()
path, rather than adding related logic to __cpu_setup(), taking into account
possible future extensions of the TCR2_ELx feature.
Fixes: bf83dae90fbc ("arm64: enable the Permission Overlay Extension for EL0")
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
---
NOTE:
This patch based on v6.19-rc4
---
arch/arm64/include/asm/suspend.h | 2 +-
arch/arm64/mm/proc.S | 22 ++++++++++++++--------
2 files changed, 15 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index e65f33edf9d6..e9ce68d50ba4 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -2,7 +2,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 13
+#define NR_CTX_REGS 14
#define NR_CALLEE_SAVED_REGS 12
/*
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 01e868116448..3888f2ca43fb 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -97,8 +97,11 @@ SYM_FUNC_START(cpu_do_suspend)
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
- get_this_cpu_offset x12
- mrs x13, sp_el0
+alternative_if ARM64_HAS_TCR2
+ mrs x12, REG_TCR2_EL1
+alternative_else_nop_endif
+ get_this_cpu_offset x13
+ mrs x14, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
stp x6, x7, [x0, #32]
@@ -109,7 +112,7 @@ SYM_FUNC_START(cpu_do_suspend)
* Save x18 as it may be used as a platform register, e.g. by shadow
* call stack.
*/
- str x18, [x0, #96]
+ stp x14, x18, [x0, #96]
ret
SYM_FUNC_END(cpu_do_suspend)
@@ -130,8 +133,8 @@ SYM_FUNC_START(cpu_do_resume)
* the buffer to minimize the risk of exposure when used for shadow
* call stack.
*/
- ldr x18, [x0, #96]
- str xzr, [x0, #96]
+ ldp x15, x18, [x0, #96]
+ str xzr, [x0, #104]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@@ -144,10 +147,13 @@ SYM_FUNC_START(cpu_do_resume)
msr tcr_el1, x8
msr vbar_el1, x9
msr mdscr_el1, x10
+alternative_if ARM64_HAS_TCR2
+ msr REG_TCR2_EL1, x13
+alternative_else_nop_endif
msr sctlr_el1, x12
- set_this_cpu_offset x13
- msr sp_el0, x14
+ set_this_cpu_offset x14
+ msr sp_el0, x15
/*
* Restore oslsr_el1 by writing oslar_el1
*/
@@ -161,7 +167,7 @@ alternative_if ARM64_HAS_RAS_EXTN
msr_s SYS_DISR_EL1, xzr
alternative_else_nop_endif
- ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
+ ptrauth_keys_install_kernel_nosync x15, x1, x2, x3
isb
ret
SYM_FUNC_END(cpu_do_resume)
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
On 05/01/2026 21:07, Yeoreum Yun wrote:
> TCR2_ELx.E0POE is set during smp_init().
> However, this bit is not reprogrammed when the CPU enters suspension and
> later resumes via cpu_resume(), as __cpu_setup() does not re-enable E0POE
> and there is no save/restore logic for the TCR2_ELx system register.
>
> As a result, the E0POE feature no longer works after cpu_resume().
>
> To address this, save and restore TCR2_EL1 in the cpu_suspend()/cpu_resume()
> path, rather than adding related logic to __cpu_setup(), taking into account
> possible future extensions of the TCR2_ELx feature.
This is a very good catch!
> Fixes: bf83dae90fbc ("arm64: enable the Permission Overlay Extension for EL0")
We should also Cc: stable@vger.kernel.org as this should be backported
to stable kernels that support POE.
> Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
> ---
> NOTE:
> This patch based on v6.19-rc4
> ---
> arch/arm64/include/asm/suspend.h | 2 +-
> arch/arm64/mm/proc.S | 22 ++++++++++++++--------
> 2 files changed, 15 insertions(+), 9 deletions(-)
>
> diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
> index e65f33edf9d6..e9ce68d50ba4 100644
> --- a/arch/arm64/include/asm/suspend.h
> +++ b/arch/arm64/include/asm/suspend.h
> @@ -2,7 +2,7 @@
> #ifndef __ASM_SUSPEND_H
> #define __ASM_SUSPEND_H
>
> -#define NR_CTX_REGS 13
> +#define NR_CTX_REGS 14
> #define NR_CALLEE_SAVED_REGS 12
>
> /*
> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
> index 01e868116448..3888f2ca43fb 100644
> --- a/arch/arm64/mm/proc.S
> +++ b/arch/arm64/mm/proc.S
> @@ -97,8 +97,11 @@ SYM_FUNC_START(cpu_do_suspend)
> mrs x9, mdscr_el1
> mrs x10, oslsr_el1
> mrs x11, sctlr_el1
> - get_this_cpu_offset x12
> - mrs x13, sp_el0
> +alternative_if ARM64_HAS_TCR2
> + mrs x12, REG_TCR2_EL1
> +alternative_else_nop_endif
> + get_this_cpu_offset x13
> + mrs x14, sp_el0
> stp x2, x3, [x0]
> stp x4, x5, [x0, #16]
> stp x6, x7, [x0, #32]
> @@ -109,7 +112,7 @@ SYM_FUNC_START(cpu_do_suspend)
> * Save x18 as it may be used as a platform register, e.g. by shadow
> * call stack.
> */
> - str x18, [x0, #96]
> + stp x14, x18, [x0, #96]
If TCR2_EL1 isn't supported, we store and reload an unused arbitrary
value. I think it'd be better to make it all conditional and add it at
the end, something like:
alternative_if ARM64_HAS_TCR2
mrs x2, REG_TCR2_EL1
str x2, [x0, #104]
alternative_else_nop_endif
Same idea on the resume path. This also avoids the noise of renaming
existing registers.
- Kevin
> ret
> SYM_FUNC_END(cpu_do_suspend)
>
> @@ -130,8 +133,8 @@ SYM_FUNC_START(cpu_do_resume)
> * the buffer to minimize the risk of exposure when used for shadow
> * call stack.
> */
> - ldr x18, [x0, #96]
> - str xzr, [x0, #96]
> + ldp x15, x18, [x0, #96]
> + str xzr, [x0, #104]
> msr tpidr_el0, x2
> msr tpidrro_el0, x3
> msr contextidr_el1, x4
> @@ -144,10 +147,13 @@ SYM_FUNC_START(cpu_do_resume)
> msr tcr_el1, x8
> msr vbar_el1, x9
> msr mdscr_el1, x10
> +alternative_if ARM64_HAS_TCR2
> + msr REG_TCR2_EL1, x13
> +alternative_else_nop_endif
>
> msr sctlr_el1, x12
> - set_this_cpu_offset x13
> - msr sp_el0, x14
> + set_this_cpu_offset x14
> + msr sp_el0, x15
> /*
> * Restore oslsr_el1 by writing oslar_el1
> */
> @@ -161,7 +167,7 @@ alternative_if ARM64_HAS_RAS_EXTN
> msr_s SYS_DISR_EL1, xzr
> alternative_else_nop_endif
>
> - ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
> + ptrauth_keys_install_kernel_nosync x15, x1, x2, x3
> isb
> ret
> SYM_FUNC_END(cpu_do_resume)
> --
> LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
>
Hi Kevin,
> On 05/01/2026 21:07, Yeoreum Yun wrote:
> > TCR2_ELx.E0POE is set during smp_init().
> > However, this bit is not reprogrammed when the CPU enters suspension and
> > later resumes via cpu_resume(), as __cpu_setup() does not re-enable E0POE
> > and there is no save/restore logic for the TCR2_ELx system register.
> >
> > As a result, the E0POE feature no longer works after cpu_resume().
> >
> > To address this, save and restore TCR2_EL1 in the cpu_suspend()/cpu_resume()
> > path, rather than adding related logic to __cpu_setup(), taking into account
> > possible future extensions of the TCR2_ELx feature.
>
> This is a very good catch!
>
> > Fixes: bf83dae90fbc ("arm64: enable the Permission Overlay Extension for EL0")
>
> We should also Cc: stable@vger.kernel.org as this should be backported
> to stable kernels that support POE.
>
Okay. I'll add it for next.
[...]
> > @@ -97,8 +97,11 @@ SYM_FUNC_START(cpu_do_suspend)
> > mrs x9, mdscr_el1
> > mrs x10, oslsr_el1
> > mrs x11, sctlr_el1
> > - get_this_cpu_offset x12
> > - mrs x13, sp_el0
> > +alternative_if ARM64_HAS_TCR2
> > + mrs x12, REG_TCR2_EL1
> > +alternative_else_nop_endif
> > + get_this_cpu_offset x13
> > + mrs x14, sp_el0
> > stp x2, x3, [x0]
> > stp x4, x5, [x0, #16]
> > stp x6, x7, [x0, #32]
> > @@ -109,7 +112,7 @@ SYM_FUNC_START(cpu_do_suspend)
> > * Save x18 as it may be used as a platform register, e.g. by shadow
> > * call stack.
> > */
> > - str x18, [x0, #96]
> > + stp x14, x18, [x0, #96]
>
> If TCR2_EL1 isn't supported, we store and reload an unused arbitrary
> value. I think it'd be better to make it all conditional and add it at
> the end, something like:
>
> � � alternative_if ARM64_HAS_TCR2
> � � � � mrs� � x2, REG_TCR2_EL1
> � � � � str� � x2, [x0, #104]
> � � alternative_else_nop_endif
>
> Same idea on the resume path. This also avoids the noise of renaming
> existing registers.
IMHO, I think it would be better to sustain the change since
it seems more simpler to maintain and x12 is temporary regsiter
so leaking whatever was in x12 does not really feel like a concern...
Am I missing something?
Thanks!
--
Sincerely,
Yeoreum Yun
On 07/01/2026 10:57, Yeoreum Yun wrote: >>> @@ -97,8 +97,11 @@ SYM_FUNC_START(cpu_do_suspend) >>> mrs x9, mdscr_el1 >>> mrs x10, oslsr_el1 >>> mrs x11, sctlr_el1 >>> - get_this_cpu_offset x12 >>> - mrs x13, sp_el0 >>> +alternative_if ARM64_HAS_TCR2 >>> + mrs x12, REG_TCR2_EL1 >>> +alternative_else_nop_endif >>> + get_this_cpu_offset x13 >>> + mrs x14, sp_el0 >>> stp x2, x3, [x0] >>> stp x4, x5, [x0, #16] >>> stp x6, x7, [x0, #32] >>> @@ -109,7 +112,7 @@ SYM_FUNC_START(cpu_do_suspend) >>> * Save x18 as it may be used as a platform register, e.g. by shadow >>> * call stack. >>> */ >>> - str x18, [x0, #96] >>> + stp x14, x18, [x0, #96] >> If TCR2_EL1 isn't supported, we store and reload an unused arbitrary >> value. I think it'd be better to make it all conditional and add it at >> the end, something like: >> >> alternative_if ARM64_HAS_TCR2 >> mrs x2, REG_TCR2_EL1 >> str x2, [x0, #104] >> alternative_else_nop_endif >> >> Same idea on the resume path. This also avoids the noise of renaming >> existing registers. > IMHO, I think it would be better to sustain the change since > it seems more simpler to maintain and x12 is temporary regsiter > so leaking whatever was in x12 does not really feel like a concern... Leaking is not a concern, but I don't think it's really easier to maintain. We can have all the conditional registers grouped together, like DISR_EL1 and soon SCTLR2_EL1. This avoids renaming a bunch of registers every time we save/restore a new register here. - Kevin
Hi Kevin, > On 07/01/2026 10:57, Yeoreum Yun wrote: > >>> @@ -97,8 +97,11 @@ SYM_FUNC_START(cpu_do_suspend) > >>> mrs x9, mdscr_el1 > >>> mrs x10, oslsr_el1 > >>> mrs x11, sctlr_el1 > >>> - get_this_cpu_offset x12 > >>> - mrs x13, sp_el0 > >>> +alternative_if ARM64_HAS_TCR2 > >>> + mrs x12, REG_TCR2_EL1 > >>> +alternative_else_nop_endif > >>> + get_this_cpu_offset x13 > >>> + mrs x14, sp_el0 > >>> stp x2, x3, [x0] > >>> stp x4, x5, [x0, #16] > >>> stp x6, x7, [x0, #32] > >>> @@ -109,7 +112,7 @@ SYM_FUNC_START(cpu_do_suspend) > >>> * Save x18 as it may be used as a platform register, e.g. by shadow > >>> * call stack. > >>> */ > >>> - str x18, [x0, #96] > >>> + stp x14, x18, [x0, #96] > >> If TCR2_EL1 isn't supported, we store and reload an unused arbitrary > >> value. I think it'd be better to make it all conditional and add it at > >> the end, something like: > >> > >> � � alternative_if ARM64_HAS_TCR2 > >> � � � � mrs� � x2, REG_TCR2_EL1 > >> � � � � str� � x2, [x0, #104] > >> � � alternative_else_nop_endif > >> > >> Same idea on the resume path. This also avoids the noise of renaming > >> existing registers. > > IMHO, I think it would be better to sustain the change since > > it seems more simpler to maintain and x12 is temporary regsiter > > so leaking whatever was in x12 does not really feel like a concern... > > Leaking is not a concern, but I don't think it's really easier to > maintain. We can have all the conditional registers grouped together, > like DISR_EL1 and soon SCTLR2_EL1. This avoids renaming a bunch of > registers every time we save/restore a new register here. Oh. I overlooked that point. I'll follow your suggestion. Thanks! -- Sincerely, Yeoreum Yun
© 2016 - 2026 Red Hat, Inc.