The naming is inconsistent. Rename it to fam17 to state the microarchitecture
it is applicable to, and to mirror the srso_fam19_*() change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: x86@kernel.org
CC: linux-kernel@vger.kernel.org
CC: Borislav Petkov <bp@alien8.de>
CC: Peter Zijlstra <peterz@infradead.org>
CC: Josh Poimboeuf <jpoimboe@kernel.org>
CC: Babu Moger <babu.moger@amd.com>
CC: David.Kaplan@amd.com
CC: Nikolay Borisov <nik.borisov@suse.com>
CC: gregkh@linuxfoundation.org
CC: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/include/asm/nospec-branch.h | 4 ++--
arch/x86/kernel/cpu/bugs.c | 2 +-
arch/x86/kernel/vmlinux.lds.S | 2 +-
arch/x86/lib/retpoline.S | 32 ++++++++++++++--------------
4 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 93e8de0bf94e..a4c686bc4b1f 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -349,11 +349,11 @@ static inline void __x86_return_thunk(void) {}
#endif
extern void retbleed_return_thunk(void);
-extern void srso_return_thunk(void);
+extern void srso_fam17_return_thunk(void);
extern void srso_fam19_return_thunk(void);
extern void retbleed_untrain_ret(void);
-extern void srso_untrain_ret(void);
+extern void srso_fam17_untrain_ret(void);
extern void srso_fam19_untrain_ret(void);
extern void entry_untrain_ret(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92bec0d719ce..893d14a9f282 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -2467,7 +2467,7 @@ static void __init srso_select_mitigation(void)
x86_return_thunk = srso_fam19_return_thunk;
} else {
setup_force_cpu_cap(X86_FEATURE_SRSO);
- x86_return_thunk = srso_return_thunk;
+ x86_return_thunk = srso_fam17_return_thunk;
}
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
} else {
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index c9b6f8b83187..127ccdbf6d95 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -522,7 +522,7 @@ INIT_PER_CPU(irq_stack_backing_store);
#ifdef CONFIG_RETHUNK
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
-. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+. = ASSERT((srso_fam17_safe_ret & 0x3f) == 0, "srso_fam17_safe_ret not cacheline-aligned");
#endif
#ifdef CONFIG_CPU_SRSO
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 772757ea26a7..d8732ae21122 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -194,13 +194,13 @@ SYM_CODE_END(srso_fam19_return_thunk)
*
* The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
* Retbleed sequence because the return sequence done there
- * (srso_safe_ret()) is longer and the return sequence must fully nest
+ * (srso_fam17_safe_ret()) is longer and the return sequence must fully nest
* (end before) the untraining sequence. Therefore, the untraining
* sequence must fully overlap the return sequence.
*
* Regarding alignment - the instructions which need to be untrained,
* must all start at a cacheline boundary for Zen1/2 generations. That
- * is, instruction sequences starting at srso_safe_ret() and
+ * is, instruction sequences starting at srso_fam17_safe_ret() and
* the respective instruction sequences at retbleed_return_thunk()
* must start at a cacheline boundary.
*/
@@ -268,49 +268,49 @@ __EXPORT_THUNK(retbleed_untrain_ret)
/*
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
- * above. On kernel entry, srso_untrain_ret() is executed which is a
+ * above. On kernel entry, srso_fam17_untrain_ret() is executed which is a
*
* movabs $0xccccc30824648d48,%rax
*
- * and when the return thunk executes the inner label srso_safe_ret()
+ * and when the return thunk executes the inner label srso_fam17_safe_ret()
* later, it is a stack manipulation and a RET which is mispredicted and
* thus a "safe" one to use.
*/
.align 64
- .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
-SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ .skip 64 - (srso_fam17_safe_ret - srso_fam17_untrain_ret), 0xcc
+SYM_START(srso_fam17_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_NOENDBR
.byte 0x48, 0xb8
/*
* This forces the function return instruction to speculate into a trap
- * (UD2 in srso_return_thunk() below). This RET will then mispredict
+ * (UD2 in srso_fam17_return_thunk() below). This RET will then mispredict
* and execution will continue at the return site read from the top of
* the stack.
*/
-SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+SYM_INNER_LABEL(srso_fam17_safe_ret, SYM_L_GLOBAL)
lea 8(%_ASM_SP), %_ASM_SP
ret
int3
int3
/* end of movabs */
lfence
- call srso_safe_ret
+ call srso_fam17_safe_ret
ud2
-SYM_CODE_END(srso_safe_ret)
-SYM_FUNC_END(srso_untrain_ret)
-__EXPORT_THUNK(srso_untrain_ret)
+SYM_CODE_END(srso_fam17_safe_ret)
+SYM_FUNC_END(srso_fam17_untrain_ret)
+__EXPORT_THUNK(srso_fam17_untrain_ret)
-SYM_CODE_START(srso_return_thunk)
+SYM_CODE_START(srso_fam17_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
- call srso_safe_ret
+ call srso_fam17_safe_ret
ud2
-SYM_CODE_END(srso_return_thunk)
+SYM_CODE_END(srso_fam17_return_thunk)
SYM_FUNC_START(entry_untrain_ret)
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
- "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
+ "jmp srso_fam17_untrain_ret", X86_FEATURE_SRSO, \
"jmp srso_fam19_untrain_ret", X86_FEATURE_SRSO_ALIAS
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
--
2.30.2
On Mon, Aug 21, 2023 at 12:27:21PM +0100, Andrew Cooper wrote: > The naming is inconsistent. Rename it to fam17 to state the microarchitecture > it is applicable to, and to mirror the srso_fam19_*() change. > > Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> > --- > CC: x86@kernel.org > CC: linux-kernel@vger.kernel.org > CC: Borislav Petkov <bp@alien8.de> > CC: Peter Zijlstra <peterz@infradead.org> > CC: Josh Poimboeuf <jpoimboe@kernel.org> > CC: Babu Moger <babu.moger@amd.com> > CC: David.Kaplan@amd.com > CC: Nikolay Borisov <nik.borisov@suse.com> > CC: gregkh@linuxfoundation.org > CC: Thomas Gleixner <tglx@linutronix.de> > --- > arch/x86/include/asm/nospec-branch.h | 4 ++-- > arch/x86/kernel/cpu/bugs.c | 2 +- > arch/x86/kernel/vmlinux.lds.S | 2 +- > arch/x86/lib/retpoline.S | 32 ++++++++++++++-------------- > 4 files changed, 20 insertions(+), 20 deletions(-) > Re your objtool woes: > -SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) > + .skip 64 - (srso_fam17_safe_ret - srso_fam17_untrain_ret), 0xcc > +SYM_START(srso_fam17_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) > ANNOTATE_NOENDBR > .byte 0x48, 0xb8 > > /* > * This forces the function return instruction to speculate into a trap > - * (UD2 in srso_return_thunk() below). This RET will then mispredict > + * (UD2 in srso_fam17_return_thunk() below). This RET will then mispredict > * and execution will continue at the return site read from the top of > * the stack. > */ > -SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) > +SYM_INNER_LABEL(srso_fam17_safe_ret, SYM_L_GLOBAL) Note that there is a mention of 'srso_safe_ret' in tools/objtool/arch/x86/decode.c:arch_is_embedded_insn() which you 'forgot' to update. > lea 8(%_ASM_SP), %_ASM_SP > ret > int3 > int3 > /* end of movabs */ > lfence > - call srso_safe_ret > + call srso_fam17_safe_ret > ud2 > -SYM_CODE_END(srso_safe_ret) > -SYM_FUNC_END(srso_untrain_ret) > -__EXPORT_THUNK(srso_untrain_ret) > +SYM_CODE_END(srso_fam17_safe_ret) > +SYM_FUNC_END(srso_fam17_untrain_ret) > +__EXPORT_THUNK(srso_fam17_untrain_ret)
© 2016 - 2025 Red Hat, Inc.