On Tue, Aug 27, 2024 at 1:31 AM Deepak Gupta <debug@rivosinc.com> wrote:
>
> zicfiss protects shadow stack using new page table encodings PTE.W=0,
> PTE.R=0 and PTE.X=0. This encoding is reserved if zicfiss is not
Shouldn't this be R=0, W=1, and X=0 ?
Alistair
> implemented or if shadow stack are not enabled.
> Loads on shadow stack memory are allowed while stores to shadow stack
> memory leads to access faults. Shadow stack accesses to RO memory
> leads to store page fault.
>
> To implement special nature of shadow stack memory where only selected
> stores (shadow stack stores from sspush) have to be allowed while rest
> of regular stores disallowed, new MMU TLB index is created for shadow
> stack.
>
> Signed-off-by: Deepak Gupta <debug@rivosinc.com>
> Suggested-by: Richard Henderson <richard.henderson@linaro.org>
> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> target/riscv/cpu_helper.c | 37 +++++++++++++++++++++++++++++++------
> target/riscv/internals.h | 3 +++
> 2 files changed, 34 insertions(+), 6 deletions(-)
>
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index ca6d8f1f39..b10c3a35c4 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -892,6 +892,8 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
> hwaddr ppn;
> int napot_bits = 0;
> target_ulong napot_mask;
> + bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
> + bool sstack_page = false;
>
> /*
> * Check if we should use the background registers for the two
> @@ -1100,21 +1102,36 @@ restart:
> return TRANSLATE_FAIL;
> }
>
> + target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
> /* Check for reserved combinations of RWX flags. */
> - switch (pte & (PTE_R | PTE_W | PTE_X)) {
> - case PTE_W:
> + switch (rwx) {
> case PTE_W | PTE_X:
> return TRANSLATE_FAIL;
> + case PTE_W:
> + /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
> + if (cpu_get_bcfien(env) && first_stage) {
> + sstack_page = true;
> + /* if ss index, read and write allowed. else only read allowed */
> + rwx = is_sstack_idx ? PTE_R | PTE_W : PTE_R;
> + break;
> + }
> + return TRANSLATE_FAIL;
> + case PTE_R:
> + /* shadow stack writes to readonly memory are page faults */
> + if (is_sstack_idx && access_type == MMU_DATA_STORE) {
> + return TRANSLATE_FAIL;
> + }
> + break;
> }
>
> int prot = 0;
> - if (pte & PTE_R) {
> + if (rwx & PTE_R) {
> prot |= PAGE_READ;
> }
> - if (pte & PTE_W) {
> + if (rwx & PTE_W) {
> prot |= PAGE_WRITE;
> }
> - if (pte & PTE_X) {
> + if (rwx & PTE_X) {
> bool mxr = false;
>
> /*
> @@ -1159,7 +1176,7 @@ restart:
>
> if (!((prot >> access_type) & 1)) {
> /* Access check failed */
> - return TRANSLATE_FAIL;
> + return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
> }
>
> target_ulong updated_pte = pte;
> @@ -1346,9 +1363,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> break;
> case MMU_DATA_LOAD:
> cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
> + /* shadow stack mis aligned accesses are access faults */
> + if (mmu_idx & MMU_IDX_SS_WRITE) {
> + cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
> + }
> break;
> case MMU_DATA_STORE:
> cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
> + /* shadow stack mis aligned accesses are access faults */
> + if (mmu_idx & MMU_IDX_SS_WRITE) {
> + cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
> + }
> break;
> default:
> g_assert_not_reached();
> diff --git a/target/riscv/internals.h b/target/riscv/internals.h
> index 0ac17bc5ad..ddbdee885b 100644
> --- a/target/riscv/internals.h
> +++ b/target/riscv/internals.h
> @@ -30,12 +30,15 @@
> * - U+2STAGE 0b100
> * - S+2STAGE 0b101
> * - S+SUM+2STAGE 0b110
> + * - Shadow stack+U 0b1000
> + * - Shadow stack+S 0b1001
> */
> #define MMUIdx_U 0
> #define MMUIdx_S 1
> #define MMUIdx_S_SUM 2
> #define MMUIdx_M 3
> #define MMU_2STAGE_BIT (1 << 2)
> +#define MMU_IDX_SS_WRITE (1 << 3)
>
> static inline int mmuidx_priv(int mmu_idx)
> {
> --
> 2.44.0
>
>