Adding support for hardware support for shadow call stack on riscv. This
patch enables scs_* macros to use zicfiss shadow stack pointer (CSR_SSP)
instead of relying on `gp`.
Since zicfiss based shadow stack needs to have correct encoding set in PTE
init shadow stack can't be established too early. It has to be setup after
`setup_vm` is called. Thus `scs_load_init_stack` is noped out if
CONFIG_ARCH_HAS_KERNEL_SHADOW_STACK is not selected.
Adds `arch_scs_store` that can be used in generic scs magic store routine.
Signed-off-by: Deepak Gupta <debug@rivosinc.com>
---
arch/riscv/include/asm/asm.h | 2 +-
arch/riscv/include/asm/scs.h | 48 +++++++++++++++++++++++++++++++++++---------
arch/riscv/kernel/entry.S | 14 ++++++-------
arch/riscv/kernel/head.S | 4 ++--
4 files changed, 49 insertions(+), 19 deletions(-)
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index a8a2af6dfe9d..256aff523dd4 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -110,7 +110,7 @@
REG_L \dst, 0(\dst)
.endm
-#ifdef CONFIG_SHADOW_CALL_STACK
+#if defined(CONFIG_SHADOW_CALL_STACK) && !defined(CONFIG_ARCH_HAS_KERNEL_SHADOW_STACK)
/* gp is used as the shadow call stack pointer instead */
.macro load_global_pointer
.endm
diff --git a/arch/riscv/include/asm/scs.h b/arch/riscv/include/asm/scs.h
index 0e45db78b24b..e70e6ef14bc5 100644
--- a/arch/riscv/include/asm/scs.h
+++ b/arch/riscv/include/asm/scs.h
@@ -9,46 +9,76 @@
/* Load init_shadow_call_stack to gp. */
.macro scs_load_init_stack
+#ifndef CONFIG_ARCH_HAS_KERNEL_SHADOW_STACK
la gp, init_shadow_call_stack
XIP_FIXUP_OFFSET gp
+#endif
.endm
/* Load the per-CPU IRQ shadow call stack to gp. */
-.macro scs_load_irq_stack tmp
+.macro scs_load_irq_stack tmp tmp1
+#ifdef CONFIG_ARCH_HAS_KERNEL_SHADOW_STACK
+ load_per_cpu \tmp1, irq_shadow_call_stack_ptr, \tmp
+ li \tmp, 4096
+ add \tmp, \tmp, \tmp1
+ csrw CSR_SSP, \tmp
+#else
load_per_cpu gp, irq_shadow_call_stack_ptr, \tmp
+#endif
.endm
/* Load task_scs_sp(current) to gp. */
-.macro scs_load_current
+.macro scs_load_current tmp
+#ifdef CONFIG_ARCH_HAS_KERNEL_SHADOW_STACK
+ REG_L \tmp, TASK_TI_SCS_SP(tp)
+ csrw CSR_SSP, \tmp
+#else
REG_L gp, TASK_TI_SCS_SP(tp)
+#endif
.endm
/* Load task_scs_sp(current) to gp, but only if tp has changed. */
-.macro scs_load_current_if_task_changed prev
+.macro scs_load_current_if_task_changed prev tmp
beq \prev, tp, _skip_scs
- scs_load_current
+ scs_load_current \tmp
_skip_scs:
.endm
/* Save gp to task_scs_sp(current). */
-.macro scs_save_current
+.macro scs_save_current tmp
+#ifdef CONFIG_ARCH_HAS_KERNEL_SHADOW_STACK
+ csrr \tmp, CSR_SSP
+ REG_S \tmp, TASK_TI_SCS_SP(tp)
+#else
REG_S gp, TASK_TI_SCS_SP(tp)
+#endif
.endm
#else /* CONFIG_SHADOW_CALL_STACK */
.macro scs_load_init_stack
.endm
-.macro scs_load_irq_stack tmp
+.macro scs_load_irq_stack tmp tmp1
.endm
-.macro scs_load_current
+.macro scs_load_current tmp
.endm
-.macro scs_load_current_if_task_changed prev
+.macro scs_load_current_if_task_changed prev tmp
.endm
-.macro scs_save_current
+.macro scs_save_current tmp
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
#endif /* __ASSEMBLY__ */
+#ifdef CONFIG_ARCH_HAS_KERNEL_SHADOW_STACK
+#define arch_scs_store(ss_addr, magic_val) do { \
+ asm volatile ("ssamoswap.d %0, %2, %1" \
+ : "=r" (magic_val), "+A" (*ss_addr) \
+ : "r" (magic_val) \
+ : "memory"); \
+ } while (0)
+#else
+#define arch_scs_store(ss_addr, magic_val) do {} while (0)
+#endif
+
#endif /* _ASM_SCS_H */
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 3f0890b9c0b9..800a5ab763af 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -199,7 +199,7 @@ SYM_CODE_START(handle_exception)
load_global_pointer
/* Load the kernel shadow call stack pointer if coming from userspace */
- scs_load_current_if_task_changed s5
+ scs_load_current_if_task_changed s5 t0
#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE
move a0, sp
@@ -260,7 +260,7 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
REG_S s0, TASK_TI_KERNEL_SP(tp)
/* Save the kernel shadow call stack pointer */
- scs_save_current
+ scs_save_current t0
/*
* Save TP into the scratch register , so we can find the kernel data
@@ -382,8 +382,8 @@ SYM_FUNC_START(call_on_irq_stack)
addi s0, sp, STACKFRAME_SIZE_ON_STACK
/* Switch to the per-CPU shadow call stack */
- scs_save_current
- scs_load_irq_stack t0
+ scs_save_current t0
+ scs_load_irq_stack t0 t1
/* Switch to the per-CPU IRQ stack and call the handler */
load_per_cpu t0, irq_stack_ptr, t1
@@ -393,7 +393,7 @@ SYM_FUNC_START(call_on_irq_stack)
jalr a1
/* Switch back to the thread shadow call stack */
- scs_load_current
+ scs_load_current t0
/* Switch back to the thread stack and restore ra and s0 */
addi sp, s0, -STACKFRAME_SIZE_ON_STACK
@@ -440,7 +440,7 @@ SYM_FUNC_START(__switch_to)
REG_S s0, TASK_THREAD_SUM_RA(a3)
/* Save the kernel shadow call stack pointer */
- scs_save_current
+ scs_save_current t0
/* Restore context from next->thread */
REG_L s0, TASK_THREAD_SUM_RA(a4)
li s1, SR_SUM
@@ -463,7 +463,7 @@ SYM_FUNC_START(__switch_to)
/* The offset of thread_info in task_struct is zero. */
move tp, a1
/* Switch to the next shadow call stack */
- scs_load_current
+ scs_load_current t0
ret
SYM_FUNC_END(__switch_to)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 59af044bf85c..366e15a9280a 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -184,7 +184,7 @@ secondary_start_sbi:
REG_S a0, (a1)
1:
#endif
- scs_load_current
+ scs_load_current t0
#if defined(CONFIG_RISCV_SBI) && defined(CONFIG_RISCV_KERNEL_CFI)
li a7, SBI_EXT_FWFT
@@ -367,7 +367,7 @@ SYM_CODE_START(_start_kernel)
REG_S a0, (a1)
1:
#endif
- scs_load_current
+ scs_load_current t0
#if defined(CONFIG_RISCV_SBI) && defined(CONFIG_RISCV_KERNEL_CFI)
li a7, SBI_EXT_FWFT
--
2.43.0