SCTLR2_EL1 register is optional starting from ARMv8.8/ARMv9.3,
and becomes mandatory from ARMv8.9/ARMv9.4
and serveral architectural feature are controled by bits in
these registers (i.e) FEAT_PAuth_LR or FEAT_CPA/CPA2
Save and restore SCTLR2_EL1 when cpu_suspend() and resume().
so that configured value can sustain consistency before suspend and
after resume.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
---
arch/arm64/include/asm/suspend.h | 2 +-
arch/arm64/mm/proc.S | 26 ++++++++++++++++++--------
2 files changed, 19 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 0cde2f473971..eb60c9735553 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -2,7 +2,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 13
+#define NR_CTX_REGS 14
#define NR_CALLEE_SAVED_REGS 12
/*
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 8c75965afc9e..f297bea7103b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -87,8 +87,14 @@ SYM_FUNC_START(cpu_do_suspend)
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
- get_this_cpu_offset x12
- mrs x13, sp_el0
+alternative_if_not ARM64_HAS_SCTLR2
+ mov x12, xzr
+alternative_else
+ mrs_s x12, SYS_SCTLR2_EL1
+alternative_endif
+ get_this_cpu_offset x13
+ mrs x14, sp_el0
+
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
stp x6, x7, [x0, #32]
@@ -99,7 +105,7 @@ SYM_FUNC_START(cpu_do_suspend)
* Save x18 as it may be used as a platform register, e.g. by shadow
* call stack.
*/
- str x18, [x0, #96]
+ stp x14, x18, [x0, #96]
ret
SYM_FUNC_END(cpu_do_suspend)
@@ -120,8 +126,8 @@ SYM_FUNC_START(cpu_do_resume)
* the buffer to minimize the risk of exposure when used for shadow
* call stack.
*/
- ldr x18, [x0, #96]
- str xzr, [x0, #96]
+ ldp x15, x18, [x0, #96]
+ str xzr, [x0, #104]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@@ -136,8 +142,12 @@ SYM_FUNC_START(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
- set_this_cpu_offset x13
- msr sp_el0, x14
+alternative_if ARM64_HAS_SCTLR2
+ msr_s SYS_SCTLR2_EL1, x13
+alternative_else_nop_endif
+
+ set_this_cpu_offset x14
+ msr sp_el0, x15
/*
* Restore oslsr_el1 by writing oslar_el1
*/
@@ -151,7 +161,7 @@ alternative_if ARM64_HAS_RAS_EXTN
msr_s SYS_DISR_EL1, xzr
alternative_else_nop_endif
- ptrauth_keys_install_kernel_nosync x14, x1, x2, x3
+ ptrauth_keys_install_kernel_nosync x15, x1, x2, x3
isb
ret
SYM_FUNC_END(cpu_do_resume)
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
Hi, On Wed, Aug 13, 2025 at 01:01:16PM +0100, Yeoreum Yun wrote: > SCTLR2_EL1 register is optional starting from ARMv8.8/ARMv9.3, > and becomes mandatory from ARMv8.9/ARMv9.4 > and serveral architectural feature are controled by bits in > these registers (i.e) FEAT_PAuth_LR or FEAT_CPA/CPA2 > > Save and restore SCTLR2_EL1 when cpu_suspend() and resume(). > so that configured value can sustain consistency before suspend and > after resume. > > Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com> > --- > arch/arm64/include/asm/suspend.h | 2 +- > arch/arm64/mm/proc.S | 26 ++++++++++++++++++-------- > 2 files changed, 19 insertions(+), 9 deletions(-) > > diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h > index 0cde2f473971..eb60c9735553 100644 > --- a/arch/arm64/include/asm/suspend.h > +++ b/arch/arm64/include/asm/suspend.h > @@ -2,7 +2,7 @@ > #ifndef __ASM_SUSPEND_H > #define __ASM_SUSPEND_H > > -#define NR_CTX_REGS 13 > +#define NR_CTX_REGS 14 > #define NR_CALLEE_SAVED_REGS 12 > > /* > diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S > index 8c75965afc9e..f297bea7103b 100644 > --- a/arch/arm64/mm/proc.S > +++ b/arch/arm64/mm/proc.S > @@ -87,8 +87,14 @@ SYM_FUNC_START(cpu_do_suspend) > mrs x9, mdscr_el1 > mrs x10, oslsr_el1 > mrs x11, sctlr_el1 > - get_this_cpu_offset x12 > - mrs x13, sp_el0 > +alternative_if_not ARM64_HAS_SCTLR2 > + mov x12, xzr Looking at this, maybe it can just be a nop for the !ARM64_HAS_SCTLR2 case. (So, alternative_if ... alternative_if_else_nop_endif, similarly to what you have in cpu_do_resume.) The memory used to save this state should not be accessible to anything less privileged than the kernel anyway, so leaking whatever was in x12 does not really feel like a concern... > +alternative_else > + mrs_s x12, SYS_SCTLR2_EL1 > +alternative_endif > + get_this_cpu_offset x13 > + mrs x14, sp_el0 > + > stp x2, x3, [x0] > stp x4, x5, [x0, #16] > stp x6, x7, [x0, #32] > @@ -99,7 +105,7 @@ SYM_FUNC_START(cpu_do_suspend) > * Save x18 as it may be used as a platform register, e.g. by shadow > * call stack. > */ > - str x18, [x0, #96] > + stp x14, x18, [x0, #96] > ret > SYM_FUNC_END(cpu_do_suspend) > > @@ -120,8 +126,8 @@ SYM_FUNC_START(cpu_do_resume) > * the buffer to minimize the risk of exposure when used for shadow > * call stack. > */ > - ldr x18, [x0, #96] > - str xzr, [x0, #96] > + ldp x15, x18, [x0, #96] > + str xzr, [x0, #104] > msr tpidr_el0, x2 > msr tpidrro_el0, x3 > msr contextidr_el1, x4 > @@ -136,8 +142,12 @@ SYM_FUNC_START(cpu_do_resume) > msr mdscr_el1, x10 > > msr sctlr_el1, x12 > - set_this_cpu_offset x13 > - msr sp_el0, x14 > +alternative_if ARM64_HAS_SCTLR2 > + msr_s SYS_SCTLR2_EL1, x13 > +alternative_else_nop_endif > + > + set_this_cpu_offset x14 > + msr sp_el0, x15 > /* > * Restore oslsr_el1 by writing oslar_el1 > */ > @@ -151,7 +161,7 @@ alternative_if ARM64_HAS_RAS_EXTN > msr_s SYS_DISR_EL0, xzr > alternative_else_nop_endif > > - ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 > + ptrauth_keys_install_kernel_nosync x15, x1, x2, x3 > isb > ret > SYM_FUNC_END(cpu_do_resume) > -- > LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} Otherwise, this looks OK to me. Cheers ---Dave
Hi Dave, > On Wed, Aug 13, 2025 at 01:01:16PM +0100, Yeoreum Yun wrote: > > SCTLR2_EL1 register is optional starting from ARMv8.8/ARMv9.3, > > and becomes mandatory from ARMv8.9/ARMv9.4 > > and serveral architectural feature are controled by bits in > > these registers (i.e) FEAT_PAuth_LR or FEAT_CPA/CPA2 > > > > Save and restore SCTLR2_EL1 when cpu_suspend() and resume(). > > so that configured value can sustain consistency before suspend and > > after resume. > > > > Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com> > > --- > > arch/arm64/include/asm/suspend.h | 2 +- > > arch/arm64/mm/proc.S | 26 ++++++++++++++++++-------- > > 2 files changed, 19 insertions(+), 9 deletions(-) > > > > diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h > > index 0cde2f473971..eb60c9735553 100644 > > --- a/arch/arm64/include/asm/suspend.h > > +++ b/arch/arm64/include/asm/suspend.h > > @@ -2,7 +2,7 @@ > > #ifndef __ASM_SUSPEND_H > > #define __ASM_SUSPEND_H > > > > -#define NR_CTX_REGS 13 > > +#define NR_CTX_REGS 14 > > #define NR_CALLEE_SAVED_REGS 12 > > > > /* > > diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S > > index 8c75965afc9e..f297bea7103b 100644 > > --- a/arch/arm64/mm/proc.S > > +++ b/arch/arm64/mm/proc.S > > @@ -87,8 +87,14 @@ SYM_FUNC_START(cpu_do_suspend) > > mrs x9, mdscr_el1 > > mrs x10, oslsr_el1 > > mrs x11, sctlr_el1 > > - get_this_cpu_offset x12 > > - mrs x13, sp_el0 > > +alternative_if_not ARM64_HAS_SCTLR2 > > + mov x12, xzr > > Looking at this, maybe it can just be a nop for the !ARM64_HAS_SCTLR2 > case. > > (So, alternative_if ... alternative_if_else_nop_endif, similarly to > what you have in cpu_do_resume.) > > The memory used to save this state should not be accessible to anything > less privileged than the kernel anyway, so leaking whatever was in x12 > does not really feel like a concern... Right. I'll change this. Thanks! > > > > +alternative_else > > + mrs_s x12, SYS_SCTLR2_EL1 > > +alternative_endif > > + get_this_cpu_offset x13 > > + mrs x14, sp_el0 > > + > > stp x2, x3, [x0] > > stp x4, x5, [x0, #16] > > stp x6, x7, [x0, #32] > > @@ -99,7 +105,7 @@ SYM_FUNC_START(cpu_do_suspend) > > * Save x18 as it may be used as a platform register, e.g. by shadow > > * call stack. > > */ > > - str x18, [x0, #96] > > + stp x14, x18, [x0, #96] > > ret > > SYM_FUNC_END(cpu_do_suspend) > > > > @@ -120,8 +126,8 @@ SYM_FUNC_START(cpu_do_resume) > > * the buffer to minimize the risk of exposure when used for shadow > > * call stack. > > */ > > - ldr x18, [x0, #96] > > - str xzr, [x0, #96] > > + ldp x15, x18, [x0, #96] > > + str xzr, [x0, #104] > > msr tpidr_el0, x2 > > msr tpidrro_el0, x3 > > msr contextidr_el1, x4 > > @@ -136,8 +142,12 @@ SYM_FUNC_START(cpu_do_resume) > > msr mdscr_el1, x10 > > > > msr sctlr_el1, x12 > > - set_this_cpu_offset x13 > > - msr sp_el0, x14 > > +alternative_if ARM64_HAS_SCTLR2 > > + msr_s SYS_SCTLR2_EL1, x13 > > +alternative_else_nop_endif > > + > > + set_this_cpu_offset x14 > > + msr sp_el0, x15 > > /* > > * Restore oslsr_el1 by writing oslar_el1 > > */ > > @@ -151,7 +161,7 @@ alternative_if ARM64_HAS_RAS_EXTN > > msr_s SYS_DISR_EL0, xzr > > alternative_else_nop_endif > > > > - ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 > > + ptrauth_keys_install_kernel_nosync x15, x1, x2, x3 > > isb > > ret > > SYM_FUNC_END(cpu_do_resume) > > -- > > LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7} > > Otherwise, this looks OK to me. > > Cheers > ---Dave -- Sincerely, Yeoreum Yun
© 2016 - 2025 Red Hat, Inc.