SCTLR2_EL1 register is optional starting from ARMv8.8/ARMv9.3,
and becomes mandatory from ARMv8.9/ARMv9.4
and serveral architectural feature are controled by bits in
these registers and some of bits could be configurable per task
not globally -- i.e) FEAT_CPA2 related field and etc.
For future usage of these fields, make the per-task SCTLR2_EL1.
Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
---
arch/arm64/include/asm/processor.h | 5 +++++
arch/arm64/kernel/process.c | 9 +++++++++
2 files changed, 14 insertions(+)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 61d62bfd5a7b..2c962816de70 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -184,6 +184,7 @@ struct thread_struct {
u64 mte_ctrl;
#endif
u64 sctlr_user;
+ u64 sctlr2_user;
u64 svcr;
u64 tpidr2_el0;
u64 por_el0;
@@ -258,6 +259,9 @@ static inline void task_set_sve_vl_onexec(struct task_struct *task,
(SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \
SCTLR_EL1_TCF0_MASK)
+#define SCTLR2_USER_MASK \
+ (SCTLR2_EL1_EnPACM0 | SCTLR2_EL1_CPTA0 | SCTLR2_EL1_CPTM0)
+
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
@@ -370,6 +374,7 @@ struct task_struct;
unsigned long __get_wchan(struct task_struct *p);
void update_sctlr_el1(u64 sctlr);
+void update_sctlr2_el1(u64 sctlr2);
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 96482a1412c6..9191180c4875 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -698,6 +698,11 @@ void update_sctlr_el1(u64 sctlr)
isb();
}
+void update_sctlr2_el1(u64 sctlr2)
+{
+ sysreg_clear_set_s(SYS_SCTLR2_EL1, SCTLR2_USER_MASK, sctlr2);
+}
+
/*
* Thread switching.
*/
@@ -737,6 +742,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
if (prev->thread.sctlr_user != next->thread.sctlr_user)
update_sctlr_el1(next->thread.sctlr_user);
+ if (alternative_has_cap_unlikely(ARM64_HAS_SCTLR2) &&
+ prev->thread.sctlr2_user != next->thread.sctlr2_user)
+ update_sctlr2_el1(next->thread.sctlr2_user);
+
/* the actual thread switch */
last = cpu_switch_to(prev, next);
--
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}
Hi, On Wed, Aug 13, 2025 at 01:01:18PM +0100, Yeoreum Yun wrote: > SCTLR2_EL1 register is optional starting from ARMv8.8/ARMv9.3, > and becomes mandatory from ARMv8.9/ARMv9.4 > and serveral architectural feature are controled by bits in > these registers and some of bits could be configurable per task > not globally -- i.e) FEAT_CPA2 related field and etc. > > For future usage of these fields, make the per-task SCTLR2_EL1. It is worth pointing out the impact of this: for platforms without FEAT_SCTLR2 support, there is no functional change and minimal performance overhead. > Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com> > --- > arch/arm64/include/asm/processor.h | 5 +++++ > arch/arm64/kernel/process.c | 9 +++++++++ > 2 files changed, 14 insertions(+) > > diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h > index 61d62bfd5a7b..2c962816de70 100644 > --- a/arch/arm64/include/asm/processor.h > +++ b/arch/arm64/include/asm/processor.h > @@ -184,6 +184,7 @@ struct thread_struct { > u64 mte_ctrl; > #endif > u64 sctlr_user; > + u64 sctlr2_user; > u64 svcr; > u64 tpidr2_el0; > u64 por_el0; > @@ -258,6 +259,9 @@ static inline void task_set_sve_vl_onexec(struct task_struct *task, > (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \ > SCTLR_EL1_TCF0_MASK) > > +#define SCTLR2_USER_MASK \ > + (SCTLR2_EL1_EnPACM0 | SCTLR2_EL1_CPTA0 | SCTLR2_EL1_CPTM0) > + The kernel doesn't know about any of these features, yet. It's probably better to make this 0 for this patch series, and add bits to this mask only when they are needed / used. > static inline void arch_thread_struct_whitelist(unsigned long *offset, > unsigned long *size) > { > @@ -370,6 +374,7 @@ struct task_struct; > unsigned long __get_wchan(struct task_struct *p); > > void update_sctlr_el1(u64 sctlr); > +void update_sctlr2_el1(u64 sctlr2); Is this function used outside process.c yet? If not, you can drop this declaration and [... below ...] > > /* Thread switching */ > extern struct task_struct *cpu_switch_to(struct task_struct *prev, > diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c > index 96482a1412c6..9191180c4875 100644 > --- a/arch/arm64/kernel/process.c > +++ b/arch/arm64/kernel/process.c > @@ -698,6 +698,11 @@ void update_sctlr_el1(u64 sctlr) > isb(); > } > > +void update_sctlr2_el1(u64 sctlr2) [...] make the function static here. > +{ > + sysreg_clear_set_s(SYS_SCTLR2_EL1, SCTLR2_USER_MASK, sctlr2); > +} > + > /* > * Thread switching. > */ > @@ -737,6 +742,10 @@ struct task_struct *__switch_to(struct task_struct *prev, > if (prev->thread.sctlr_user != next->thread.sctlr_user) > update_sctlr_el1(next->thread.sctlr_user); > > + if (alternative_has_cap_unlikely(ARM64_HAS_SCTLR2) && > + prev->thread.sctlr2_user != next->thread.sctlr2_user) > + update_sctlr2_el1(next->thread.sctlr2_user); > + > /* the actual thread switch */ > last = cpu_switch_to(prev, next); [...] Otherwise, I guess this looks OK. Cheers ---Dave
Hi Dave, > On Wed, Aug 13, 2025 at 01:01:18PM +0100, Yeoreum Yun wrote: > > SCTLR2_EL1 register is optional starting from ARMv8.8/ARMv9.3, > > and becomes mandatory from ARMv8.9/ARMv9.4 > > and serveral architectural feature are controled by bits in > > these registers and some of bits could be configurable per task > > not globally -- i.e) FEAT_CPA2 related field and etc. > > > > For future usage of these fields, make the per-task SCTLR2_EL1. > > It is worth pointing out the impact of this: for platforms without > FEAT_SCTLR2 support, there is no functional change and minimal > performance overhead. Thanks for your suggestion :) > > > Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com> > > --- > > arch/arm64/include/asm/processor.h | 5 +++++ > > arch/arm64/kernel/process.c | 9 +++++++++ > > 2 files changed, 14 insertions(+) > > > > diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h > > index 61d62bfd5a7b..2c962816de70 100644 > > --- a/arch/arm64/include/asm/processor.h > > +++ b/arch/arm64/include/asm/processor.h > > @@ -184,6 +184,7 @@ struct thread_struct { > > u64 mte_ctrl; > > #endif > > u64 sctlr_user; > > + u64 sctlr2_user; > > u64 svcr; > > u64 tpidr2_el0; > > u64 por_el0; > > @@ -258,6 +259,9 @@ static inline void task_set_sve_vl_onexec(struct task_struct *task, > > (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \ > > SCTLR_EL1_TCF0_MASK) > > > > +#define SCTLR2_USER_MASK \ > > + (SCTLR2_EL1_EnPACM0 | SCTLR2_EL1_CPTA0 | SCTLR2_EL1_CPTM0) > > + > > The kernel doesn't know about any of these features, yet. > > It's probably better to make this 0 for this patch series, and add bits > to this mask only when they are needed / used. I see. Thanks! > > > static inline void arch_thread_struct_whitelist(unsigned long *offset, > > unsigned long *size) > > { > > @@ -370,6 +374,7 @@ struct task_struct; > > unsigned long __get_wchan(struct task_struct *p); > > > > void update_sctlr_el1(u64 sctlr); > > +void update_sctlr2_el1(u64 sctlr2); > > Is this function used outside process.c yet? If not, you can drop this > declaration and [... below ...] > > > > /* Thread switching */ > > extern struct task_struct *cpu_switch_to(struct task_struct *prev, > > diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c > > index 96482a1412c6..9191180c4875 100644 > > --- a/arch/arm64/kernel/process.c > > +++ b/arch/arm64/kernel/process.c > > @@ -698,6 +698,11 @@ void update_sctlr_el1(u64 sctlr) > > isb(); > > } > > > > +void update_sctlr2_el1(u64 sctlr2) > > [...] make the function static here. You're right. there's no usage anyware except here. I'll change this. Thanks! > > > > +{ > > + sysreg_clear_set_s(SYS_SCTLR2_EL1, SCTLR2_USER_MASK, sctlr2); > > +} > > + > > /* > > * Thread switching. > > */ > > @@ -737,6 +742,10 @@ struct task_struct *__switch_to(struct task_struct *prev, > > if (prev->thread.sctlr_user != next->thread.sctlr_user) > > update_sctlr_el1(next->thread.sctlr_user); > > > > + if (alternative_has_cap_unlikely(ARM64_HAS_SCTLR2) && > > + prev->thread.sctlr2_user != next->thread.sctlr2_user) > > + update_sctlr2_el1(next->thread.sctlr2_user); > > + > > /* the actual thread switch */ > > last = cpu_switch_to(prev, next); > > [...] > > Otherwise, I guess this looks OK. > > Cheers > ---Dave -- Sincerely, Yeoreum Yun
© 2016 - 2025 Red Hat, Inc.