zicfilp protects forward control flow (if enabled) by enforcing all
indirect call and jmp must land on a landing pad instruction `lpad`. If
target of an indirect call or jmp is not `lpad` then cpu/hart must raise
a sw check exception with tval = 2.
This patch implements the mechanism using TCG. Target architecture branch
instruction must define the end of a TB. Using this property, during
translation of branch instruction, TB flag = FCFI_LP_EXPECTED can be set.
Translation of target TB can check if FCFI_LP_EXPECTED flag is set and a
flag (fcfi_lp_expected) can be set in DisasContext. If `lpad` gets
translated, fcfi_lp_expected flag in DisasContext can be cleared. Else
it'll fault.
Signed-off-by: Deepak Gupta <debug@rivosinc.com>
Co-developed-by: Jim Shu <jim.shu@sifive.com>
Co-developed-by: Andy Chiu <andy.chiu@sifive.com>
---
target/riscv/cpu.h | 3 +++
target/riscv/cpu_bits.h | 7 ++++++
target/riscv/cpu_helper.c | 13 +++++++++++
target/riscv/helper.h | 3 +++
target/riscv/op_helper.c | 7 ++++++
target/riscv/translate.c | 45 +++++++++++++++++++++++++++++++++++++++
6 files changed, 78 insertions(+)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 12334f9540..b77481428f 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -606,6 +606,9 @@ FIELD(TB_FLAGS, ITRIGGER, 22, 1)
FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
FIELD(TB_FLAGS, PRIV, 24, 2)
FIELD(TB_FLAGS, AXL, 26, 2)
+/* zicfilp needs a TB flag to track indirect branches */
+FIELD(TB_FLAGS, FCFI_ENABLED, 28, 1)
+FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 29, 1)
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 127f2179dc..1709564b32 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -590,6 +590,10 @@ typedef enum {
LP_EXPECTED = 1,
} cfi_elp;
+typedef enum {
+ MISSING_LPAD = 0,
+} cfi_violation_cause;
+
/* hstatus CSR bits */
#define HSTATUS_VSBE 0x00000020
#define HSTATUS_GVA 0x00000040
@@ -691,6 +695,9 @@ typedef enum RISCVException {
RISCV_EXCP_SEMIHOST = 0x3f,
} RISCVException;
+/* zicfilp defines lp violation results in sw check with tval = 2*/
+#define RISCV_EXCP_SW_CHECK_FCFI_TVAL 2
+
#define RISCV_EXCP_INT_FLAG 0x80000000
#define RISCV_EXCP_INT_MASK 0x7fffffff
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 364f3ee212..c7af430f38 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -134,6 +134,19 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
}
+ if (cpu_get_fcfien(env)) {
+ /*
+ * For Forward CFI, only the expectation of a lpcll at
+ * the start of the block is tracked (which can only happen
+ * when FCFI is enabled for the current processor mode). A jump
+ * or call at the end of the previous TB will have updated
+ * env->elp to indicate the expectation.
+ */
+ flags = FIELD_DP32(flags, TB_FLAGS, FCFI_LP_EXPECTED,
+ env->elp != NO_LP_EXPECTED);
+ flags = FIELD_DP32(flags, TB_FLAGS, FCFI_ENABLED, 1);
+ }
+
#ifdef CONFIG_USER_ONLY
fs = EXT_STATUS_DIRTY;
vs = EXT_STATUS_DIRTY;
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 451261ce5a..fc4c41db5e 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -121,6 +121,9 @@ DEF_HELPER_2(cbo_clean_flush, void, env, tl)
DEF_HELPER_2(cbo_inval, void, env, tl)
DEF_HELPER_2(cbo_zero, void, env, tl)
+/* helper for raising sw check exception */
+DEF_HELPER_4(raise_sw_check_excep, void, env, tl, tl, tl)
+
/* Special functions */
DEF_HELPER_2(csrr, tl, env, int)
DEF_HELPER_3(csrw, void, env, int, tl)
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 488116cc2e..3b47fb34ea 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -259,6 +259,13 @@ void helper_cbo_inval(CPURISCVState *env, target_ulong address)
/* We don't emulate the cache-hierarchy, so we're done. */
}
+void helper_raise_sw_check_excep(CPURISCVState *env, target_ulong swcheck_code,
+ target_ulong arg1, target_ulong arg2)
+{
+ env->sw_check_code = swcheck_code;
+ riscv_raise_exception(env, RISCV_EXCP_SW_CHECK, GETPC());
+}
+
#ifndef CONFIG_USER_ONLY
target_ulong helper_sret(CPURISCVState *env)
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index acba90f170..fbca3b8a06 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -44,6 +44,7 @@ static TCGv load_val;
/* globals for PM CSRs */
static TCGv pm_mask;
static TCGv pm_base;
+static TCGOp *cfi_lp_check;
/*
* If an operation is being performed on less than TARGET_LONG_BITS,
@@ -116,6 +117,9 @@ typedef struct DisasContext {
bool frm_valid;
bool insn_start_updated;
const GPtrArray *decoders;
+ /* zicfilp extension. fcfi_enabled, lp expected or not */
+ bool fcfi_enabled;
+ bool fcfi_lp_expected;
} DisasContext;
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
@@ -1238,6 +1242,8 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
ctx->ztso = cpu->cfg.ext_ztso;
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
+ ctx->fcfi_lp_expected = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_LP_EXPECTED);
+ ctx->fcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, FCFI_ENABLED);
ctx->zero = tcg_constant_tl(0);
ctx->virt_inst_excp = false;
ctx->decoders = cpu->decoders;
@@ -1245,6 +1251,37 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
{
+ DisasContext *ctx = container_of(db, DisasContext, base);
+
+ if (ctx->fcfi_lp_expected) {
+ /*
+ * Since we can't look ahead to confirm that the first
+ * instruction is a legal landing pad instruction, emit
+ * compare-and-branch sequence that will be fixed-up in
+ * riscv_tr_tb_stop() to either statically hit or skip an
+ * illegal instruction exception depending on whether the
+ * flag was lowered by translation of a CJLP or JLP as
+ * the first instruction in the block.
+ */
+ TCGv_i32 immediate;
+ TCGLabel *l;
+ l = gen_new_label();
+ immediate = tcg_temp_new_i32();
+ tcg_gen_movi_i32(immediate, 0);
+ cfi_lp_check = tcg_last_op();
+ tcg_gen_brcondi_i32(TCG_COND_EQ, immediate, 0, l);
+ gen_helper_raise_sw_check_excep(tcg_env,
+ tcg_constant_tl(RISCV_EXCP_SW_CHECK_FCFI_TVAL),
+ tcg_constant_tl(MISSING_LPAD), tcg_constant_tl(0));
+ gen_set_label(l);
+ /*
+ * Despite the use of gen_exception_illegal(), the rest of
+ * the TB needs to be generated. The TCG optimizer will
+ * clean things up depending on which path ends up being
+ * active.
+ */
+ ctx->base.is_jmp = DISAS_NEXT;
+ }
}
static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
@@ -1303,6 +1340,14 @@ static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
default:
g_assert_not_reached();
}
+
+ if (ctx->fcfi_lp_expected) {
+ /*
+ * If the "lp expected" flag is still up, the block needs to take an
+ * illegal instruction exception.
+ */
+ tcg_set_insn_param(cfi_lp_check, 1, tcgv_i32_arg(tcg_constant_i32(1)));
+ }
}
static const TranslatorOps riscv_tr_ops = {
--
2.44.0