Atomic memory operations perform both reads and writes as part
of their implementation, but always raise write faults.
Use TARGET_INSN_START_EXTRA_WORDS to mark amo insns in the
opcode stream, and force the access type to write at the
point of raising the exception.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/riscv/cpu.h | 15 ++++++
target/riscv/cpu.c | 3 ++
target/riscv/cpu_helper.c | 62 +++++++++++++++++--------
target/riscv/translate.c | 9 ++++
target/riscv/insn_trans/trans_rva.c.inc | 11 ++++-
5 files changed, 79 insertions(+), 21 deletions(-)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index c069fe85fa..3de4da3fa1 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -290,6 +290,13 @@ struct CPUArchState {
/* True if in debugger mode. */
bool debugger;
+ /*
+ * True if unwinding through an amo insn. Used to transform a
+ * read fault into a store_amo fault; only valid immediately
+ * after cpu_restore_state().
+ */
+ bool unwind_amo;
+
/*
* CSRs for PointerMasking extension
*/
@@ -517,6 +524,14 @@ FIELD(TB_FLAGS, XL, 20, 2)
FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
+#ifndef CONFIG_USER_ONLY
+/*
+ * RISC-V-specific extra insn start words:
+ * 1: True if the instruction is AMO, false otherwise.
+ */
+#define TARGET_INSN_START_EXTRA_WORDS 1
+#endif
+
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
#else
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index ddda4906ff..3818d5ba80 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -396,6 +396,9 @@ void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
} else {
env->pc = data[0];
}
+#ifndef CONFIG_USER_ONLY
+ env->unwind_amo = data[1];
+#endif
}
static void riscv_cpu_reset(DeviceState *dev)
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 126251d5da..b5bbe6fc39 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -1139,26 +1139,11 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- if (access_type == MMU_DATA_STORE) {
- cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
- } else if (access_type == MMU_DATA_LOAD) {
- cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
- } else {
- cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
+ cpu_restore_state(cs, retaddr, true);
+ if (env->unwind_amo) {
+ access_type = MMU_DATA_STORE;
}
- env->badaddr = addr;
- env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
- riscv_cpu_two_stage_lookup(mmu_idx);
- cpu_loop_exit_restore(cs, retaddr);
-}
-
-void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
- MMUAccessType access_type, int mmu_idx,
- uintptr_t retaddr)
-{
- RISCVCPU *cpu = RISCV_CPU(cs);
- CPURISCVState *env = &cpu->env;
switch (access_type) {
case MMU_INST_FETCH:
cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
@@ -1172,10 +1157,43 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
default:
g_assert_not_reached();
}
+
env->badaddr = addr;
env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
riscv_cpu_two_stage_lookup(mmu_idx);
- cpu_loop_exit_restore(cs, retaddr);
+ cpu_loop_exit(cs);
+}
+
+void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type, int mmu_idx,
+ uintptr_t retaddr)
+{
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+
+ cpu_restore_state(cs, retaddr, true);
+ if (env->unwind_amo) {
+ access_type = MMU_DATA_STORE;
+ }
+
+ switch (access_type) {
+ case MMU_INST_FETCH:
+ cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
+ break;
+ case MMU_DATA_LOAD:
+ cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+ break;
+ case MMU_DATA_STORE:
+ cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ env->badaddr = addr;
+ env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
+ riscv_cpu_two_stage_lookup(mmu_idx);
+ cpu_loop_exit(cs);
}
bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
@@ -1307,11 +1325,15 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else if (probe) {
return false;
} else {
+ cpu_restore_state(cs, retaddr, true);
+ if (env->unwind_amo) {
+ access_type = MMU_DATA_STORE;
+ }
raise_mmu_exception(env, address, access_type, pmp_violation,
first_stage_error,
riscv_cpu_virt_enabled(env) ||
riscv_cpu_two_stage_lookup(mmu_idx));
- cpu_loop_exit_restore(cs, retaddr);
+ cpu_loop_exit(cs);
}
return true;
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index fac998a6b5..ae4b0d1524 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -107,6 +107,10 @@ typedef struct DisasContext {
/* PointerMasking extension */
bool pm_mask_enabled;
bool pm_base_enabled;
+#ifndef CONFIG_USER_ONLY
+ /* TCG op of the current insn_start. */
+ TCGOp *insn_start;
+#endif
} DisasContext;
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
@@ -1105,7 +1109,12 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
+#ifdef CONFIG_USER_ONLY
tcg_gen_insn_start(ctx->base.pc_next);
+#else
+ tcg_gen_insn_start(ctx->base.pc_next, 0);
+ ctx->insn_start = tcg_last_op();
+#endif
}
static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
index 45db82c9be..66faa8f1da 100644
--- a/target/riscv/insn_trans/trans_rva.c.inc
+++ b/target/riscv/insn_trans/trans_rva.c.inc
@@ -37,6 +37,13 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
return true;
}
+static void record_insn_start_amo(DisasContext *ctx)
+{
+#ifndef CONFIG_USER_ONLY
+ tcg_set_insn_start_param(ctx->insn_start, 1, 1);
+#endif
+}
+
static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
{
TCGv dest, src1, src2;
@@ -73,6 +80,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
*/
tcg_gen_movi_tl(load_res, -1);
+ record_insn_start_amo(ctx);
return true;
}
@@ -85,8 +93,9 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
func(dest, src1, src2, ctx->mem_idx, mop);
-
gen_set_gpr(ctx, a->rd, dest);
+
+ record_insn_start_amo(ctx);
return true;
}
--
2.25.1
On Fri, Apr 1, 2022 at 11:04 PM Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Atomic memory operations perform both reads and writes as part
> of their implementation, but always raise write faults.
>
> Use TARGET_INSN_START_EXTRA_WORDS to mark amo insns in the
> opcode stream, and force the access type to write at the
> point of raising the exception.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/cpu.h | 15 ++++++
> target/riscv/cpu.c | 3 ++
> target/riscv/cpu_helper.c | 62 +++++++++++++++++--------
> target/riscv/translate.c | 9 ++++
> target/riscv/insn_trans/trans_rva.c.inc | 11 ++++-
> 5 files changed, 79 insertions(+), 21 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index c069fe85fa..3de4da3fa1 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -290,6 +290,13 @@ struct CPUArchState {
> /* True if in debugger mode. */
> bool debugger;
>
> + /*
> + * True if unwinding through an amo insn. Used to transform a
> + * read fault into a store_amo fault; only valid immediately
> + * after cpu_restore_state().
> + */
> + bool unwind_amo;
> +
> /*
> * CSRs for PointerMasking extension
> */
> @@ -517,6 +524,14 @@ FIELD(TB_FLAGS, XL, 20, 2)
> FIELD(TB_FLAGS, PM_MASK_ENABLED, 22, 1)
> FIELD(TB_FLAGS, PM_BASE_ENABLED, 23, 1)
>
> +#ifndef CONFIG_USER_ONLY
> +/*
> + * RISC-V-specific extra insn start words:
> + * 1: True if the instruction is AMO, false otherwise.
> + */
> +#define TARGET_INSN_START_EXTRA_WORDS 1
> +#endif
> +
> #ifdef TARGET_RISCV32
> #define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
> #else
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index ddda4906ff..3818d5ba80 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -396,6 +396,9 @@ void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
> } else {
> env->pc = data[0];
> }
> +#ifndef CONFIG_USER_ONLY
> + env->unwind_amo = data[1];
> +#endif
> }
>
> static void riscv_cpu_reset(DeviceState *dev)
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index 126251d5da..b5bbe6fc39 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -1139,26 +1139,11 @@ void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
> RISCVCPU *cpu = RISCV_CPU(cs);
> CPURISCVState *env = &cpu->env;
>
> - if (access_type == MMU_DATA_STORE) {
> - cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
> - } else if (access_type == MMU_DATA_LOAD) {
> - cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
> - } else {
> - cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
> + cpu_restore_state(cs, retaddr, true);
> + if (env->unwind_amo) {
> + access_type = MMU_DATA_STORE;
> }
>
> - env->badaddr = addr;
> - env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
> - riscv_cpu_two_stage_lookup(mmu_idx);
> - cpu_loop_exit_restore(cs, retaddr);
> -}
> -
> -void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> - MMUAccessType access_type, int mmu_idx,
> - uintptr_t retaddr)
> -{
> - RISCVCPU *cpu = RISCV_CPU(cs);
> - CPURISCVState *env = &cpu->env;
> switch (access_type) {
> case MMU_INST_FETCH:
> cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
> @@ -1172,10 +1157,43 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> default:
> g_assert_not_reached();
> }
> +
> env->badaddr = addr;
> env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
> riscv_cpu_two_stage_lookup(mmu_idx);
> - cpu_loop_exit_restore(cs, retaddr);
> + cpu_loop_exit(cs);
> +}
> +
> +void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
> + MMUAccessType access_type, int mmu_idx,
> + uintptr_t retaddr)
> +{
> + RISCVCPU *cpu = RISCV_CPU(cs);
> + CPURISCVState *env = &cpu->env;
> +
> + cpu_restore_state(cs, retaddr, true);
> + if (env->unwind_amo) {
> + access_type = MMU_DATA_STORE;
> + }
> +
> + switch (access_type) {
> + case MMU_INST_FETCH:
> + cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
> + break;
> + case MMU_DATA_LOAD:
> + cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
> + break;
> + case MMU_DATA_STORE:
> + cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
> + break;
> + default:
> + g_assert_not_reached();
> + }
> +
> + env->badaddr = addr;
> + env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
> + riscv_cpu_two_stage_lookup(mmu_idx);
> + cpu_loop_exit(cs);
> }
>
> bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> @@ -1307,11 +1325,15 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
> } else if (probe) {
> return false;
> } else {
> + cpu_restore_state(cs, retaddr, true);
> + if (env->unwind_amo) {
> + access_type = MMU_DATA_STORE;
> + }
> raise_mmu_exception(env, address, access_type, pmp_violation,
> first_stage_error,
> riscv_cpu_virt_enabled(env) ||
> riscv_cpu_two_stage_lookup(mmu_idx));
> - cpu_loop_exit_restore(cs, retaddr);
> + cpu_loop_exit(cs);
> }
>
> return true;
> diff --git a/target/riscv/translate.c b/target/riscv/translate.c
> index fac998a6b5..ae4b0d1524 100644
> --- a/target/riscv/translate.c
> +++ b/target/riscv/translate.c
> @@ -107,6 +107,10 @@ typedef struct DisasContext {
> /* PointerMasking extension */
> bool pm_mask_enabled;
> bool pm_base_enabled;
> +#ifndef CONFIG_USER_ONLY
> + /* TCG op of the current insn_start. */
> + TCGOp *insn_start;
> +#endif
> } DisasContext;
>
> static inline bool has_ext(DisasContext *ctx, uint32_t ext)
> @@ -1105,7 +1109,12 @@ static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
> {
> DisasContext *ctx = container_of(dcbase, DisasContext, base);
>
> +#ifdef CONFIG_USER_ONLY
> tcg_gen_insn_start(ctx->base.pc_next);
> +#else
> + tcg_gen_insn_start(ctx->base.pc_next, 0);
> + ctx->insn_start = tcg_last_op();
> +#endif
> }
>
> static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
> diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc
> index 45db82c9be..66faa8f1da 100644
> --- a/target/riscv/insn_trans/trans_rva.c.inc
> +++ b/target/riscv/insn_trans/trans_rva.c.inc
> @@ -37,6 +37,13 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop)
> return true;
> }
>
> +static void record_insn_start_amo(DisasContext *ctx)
> +{
> +#ifndef CONFIG_USER_ONLY
> + tcg_set_insn_start_param(ctx->insn_start, 1, 1);
> +#endif
> +}
> +
> static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
> {
> TCGv dest, src1, src2;
> @@ -73,6 +80,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop)
> */
> tcg_gen_movi_tl(load_res, -1);
>
> + record_insn_start_amo(ctx);
> return true;
> }
>
> @@ -85,8 +93,9 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a,
> TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
>
> func(dest, src1, src2, ctx->mem_idx, mop);
> -
> gen_set_gpr(ctx, a->rd, dest);
> +
> + record_insn_start_amo(ctx);
> return true;
> }
>
> --
> 2.25.1
>
>
© 2016 - 2026 Red Hat, Inc.