1
The following changes since commit 3c8c36c9087da957f580a9bb5ebf7814a753d1c6:
1
The following changes since commit ad10b4badc1dd5b28305f9b9f1168cf0aa3ae946:
2
2
3
Merge remote-tracking branch 'remotes/kraxel/tags/ui-20201104-pull-request' into staging (2020-11-04 16:52:17 +0000)
3
Merge tag 'pull-error-2024-05-27' of https://repo.or.cz/qemu/armbru into staging (2024-05-27 06:40:42 -0700)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
git@github.com:alistair23/qemu.git tags/pull-riscv-to-apply-20201109
7
https://github.com/alistair23/qemu.git tags/pull-riscv-to-apply-20240528
8
8
9
for you to fetch changes up to 96338fefc19a143abdc91f6c44f37683274b08d4:
9
for you to fetch changes up to 1806da76cb81088ea026ca3441551782b850e393:
10
10
11
hw/intc/ibex_plic: Clear the claim register when read (2020-11-09 15:09:53 -0800)
11
target/riscv: raise an exception when CSRRS/CSRRC writes a read-only CSR (2024-05-28 12:20:27 +1000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
This fixes two bugs in the RISC-V port. One is a bug in the
14
RISC-V PR for 9.1
15
Ibex PLIC, the other fixes the Hypvervisor access functions.
15
16
* APLICs add child earlier than realize
17
* Fix exposure of Zkr
18
* Raise exceptions on wrs.nto
19
* Implement SBI debug console (DBCN) calls for KVM
20
* Support 64-bit addresses for initrd
21
* Change RISCV_EXCP_SEMIHOST exception number to 63
22
* Tolerate KVM disable ext errors
23
* Set tval in breakpoints
24
* Add support for Zve32x extension
25
* Add support for Zve64x extension
26
* Relax vector register check in RISCV gdbstub
27
* Fix the element agnostic Vector function problem
28
* Fix Zvkb extension config
29
* Implement dynamic establishment of custom decoder
30
* Add th.sxstatus CSR emulation
31
* Fix Zvfhmin checking for vfwcvt.f.f.v and vfncvt.f.f.w instructions
32
* Check single width operator for vector fp widen instructions
33
* Check single width operator for vfncvt.rod.f.f.w
34
* Remove redudant SEW checking for vector fp narrow/widen instructions
35
* Prioritize pmp errors in raise_mmu_exception()
36
* Do not set mtval2 for non guest-page faults
37
* Remove experimental prefix from "B" extension
38
* Fixup CBO extension register calculation
39
* Fix the hart bit setting of AIA
40
* Fix reg_width in ricsv_gen_dynamic_vector_feature()
41
* Decode all of the pmpcfg and pmpaddr CSRs
42
* Raise an exception when CSRRS/CSRRC writes a read-only CSR
16
43
17
----------------------------------------------------------------
44
----------------------------------------------------------------
18
Alistair Francis (6):
45
Alexei Filippov (1):
19
target/riscv: Add a virtualised MMU Mode
46
target/riscv: do not set mtval2 for non guest-page faults
20
target/riscv: Set the virtualised MMU mode when doing hyp accesses
21
target/riscv: Remove the HS_TWO_STAGE flag
22
target/riscv: Remove the hyp load and store functions
23
target/riscv: Split the Hypervisor execute load helpers
24
hw/intc/ibex_plic: Clear the claim register when read
25
47
26
target/riscv/cpu-param.h | 11 ++-
48
Alistair Francis (2):
27
target/riscv/cpu.h | 19 ++++-
49
target/riscv: rvzicbo: Fixup CBO extension register calculation
28
target/riscv/cpu_bits.h | 1 -
50
disas/riscv: Decode all of the pmpcfg and pmpaddr CSRs
29
target/riscv/helper.h | 5 +-
30
hw/intc/ibex_plic.c | 3 +
31
target/riscv/cpu_helper.c | 62 ++++++--------
32
target/riscv/op_helper.c | 124 ++-------------------------
33
target/riscv/translate.c | 2 +
34
target/riscv/insn_trans/trans_rvh.c.inc | 143 ++++++++++++--------------------
35
9 files changed, 115 insertions(+), 255 deletions(-)
36
51
52
Andrew Jones (2):
53
target/riscv/kvm: Fix exposure of Zkr
54
target/riscv: Raise exceptions on wrs.nto
55
56
Cheng Yang (1):
57
hw/riscv/boot.c: Support 64-bit address for initrd
58
59
Christoph Müllner (1):
60
riscv: thead: Add th.sxstatus CSR emulation
61
62
Clément Léger (1):
63
target/riscv: change RISCV_EXCP_SEMIHOST exception number to 63
64
65
Daniel Henrique Barboza (6):
66
target/riscv/kvm: implement SBI debug console (DBCN) calls
67
target/riscv/kvm: tolerate KVM disable ext errors
68
target/riscv/debug: set tval=pc in breakpoint exceptions
69
trans_privileged.c.inc: set (m|s)tval on ebreak breakpoint
70
target/riscv: prioritize pmp errors in raise_mmu_exception()
71
riscv, gdbstub.c: fix reg_width in ricsv_gen_dynamic_vector_feature()
72
73
Huang Tao (2):
74
target/riscv: Fix the element agnostic function problem
75
target/riscv: Implement dynamic establishment of custom decoder
76
77
Jason Chien (3):
78
target/riscv: Add support for Zve32x extension
79
target/riscv: Add support for Zve64x extension
80
target/riscv: Relax vector register check in RISCV gdbstub
81
82
Max Chou (4):
83
target/riscv: rvv: Fix Zvfhmin checking for vfwcvt.f.f.v and vfncvt.f.f.w instructions
84
target/riscv: rvv: Check single width operator for vector fp widen instructions
85
target/riscv: rvv: Check single width operator for vfncvt.rod.f.f.w
86
target/riscv: rvv: Remove redudant SEW checking for vector fp narrow/widen instructions
87
88
Rob Bradford (1):
89
target/riscv: Remove experimental prefix from "B" extension
90
91
Yangyu Chen (1):
92
target/riscv/cpu.c: fix Zvkb extension config
93
94
Yong-Xuan Wang (1):
95
target/riscv/kvm.c: Fix the hart bit setting of AIA
96
97
Yu-Ming Chang (1):
98
target/riscv: raise an exception when CSRRS/CSRRC writes a read-only CSR
99
100
yang.zhang (1):
101
hw/intc/riscv_aplic: APLICs should add child earlier than realize
102
103
MAINTAINERS | 1 +
104
target/riscv/cpu.h | 11 ++
105
target/riscv/cpu_bits.h | 2 +-
106
target/riscv/cpu_cfg.h | 2 +
107
target/riscv/helper.h | 1 +
108
target/riscv/sbi_ecall_interface.h | 17 +++
109
target/riscv/tcg/tcg-cpu.h | 15 +++
110
disas/riscv.c | 65 +++++++++-
111
hw/intc/riscv_aplic.c | 8 +-
112
hw/riscv/boot.c | 4 +-
113
target/riscv/cpu.c | 10 +-
114
target/riscv/cpu_helper.c | 37 +++---
115
target/riscv/csr.c | 71 +++++++++--
116
target/riscv/debug.c | 3 +
117
target/riscv/gdbstub.c | 8 +-
118
target/riscv/kvm/kvm-cpu.c | 157 ++++++++++++++++++++++++-
119
target/riscv/op_helper.c | 17 ++-
120
target/riscv/tcg/tcg-cpu.c | 50 +++++---
121
target/riscv/th_csr.c | 79 +++++++++++++
122
target/riscv/translate.c | 31 +++--
123
target/riscv/vector_internals.c | 22 ++++
124
target/riscv/insn_trans/trans_privileged.c.inc | 2 +
125
target/riscv/insn_trans/trans_rvv.c.inc | 46 +++++---
126
target/riscv/insn_trans/trans_rvzawrs.c.inc | 29 +++--
127
target/riscv/insn_trans/trans_rvzicbo.c.inc | 16 ++-
128
target/riscv/meson.build | 1 +
129
26 files changed, 596 insertions(+), 109 deletions(-)
130
create mode 100644 target/riscv/th_csr.c
131
diff view generated by jsdifflib
New patch
1
From: "yang.zhang" <yang.zhang@hexintek.com>
1
2
3
Since only root APLICs can have hw IRQ lines, aplic->parent should
4
be initialized first.
5
6
Fixes: e8f79343cf ("hw/intc: Add RISC-V AIA APLIC device emulation")
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Signed-off-by: yang.zhang <yang.zhang@hexintek.com>
9
Cc: qemu-stable <qemu-stable@nongnu.org>
10
Message-ID: <20240409014445.278-1-gaoshanliukou@163.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
hw/intc/riscv_aplic.c | 8 ++++----
14
1 file changed, 4 insertions(+), 4 deletions(-)
15
16
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/intc/riscv_aplic.c
19
+++ b/hw/intc/riscv_aplic.c
20
@@ -XXX,XX +XXX,XX @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
21
qdev_prop_set_bit(dev, "msimode", msimode);
22
qdev_prop_set_bit(dev, "mmode", mmode);
23
24
+ if (parent) {
25
+ riscv_aplic_add_child(parent, dev);
26
+ }
27
+
28
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
29
30
if (!is_kvm_aia(msimode)) {
31
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
32
}
33
34
- if (parent) {
35
- riscv_aplic_add_child(parent, dev);
36
- }
37
-
38
if (!msimode) {
39
for (i = 0; i < num_harts; i++) {
40
CPUState *cpu = cpu_by_arch_id(hartid_base + i);
41
--
42
2.45.1
diff view generated by jsdifflib
New patch
1
From: Andrew Jones <ajones@ventanamicro.com>
1
2
3
The Zkr extension may only be exposed to KVM guests if the VMM
4
implements the SEED CSR. Use the same implementation as TCG.
5
6
Without this patch, running with a KVM which does not forward the
7
SEED CSR access to QEMU will result in an ILL exception being
8
injected into the guest (this results in Linux guests crashing on
9
boot). And, when running with a KVM which does forward the access,
10
QEMU will crash, since QEMU doesn't know what to do with the exit.
11
12
Fixes: 3108e2f1c69d ("target/riscv/kvm: update KVM exts to Linux 6.8")
13
Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
15
Cc: qemu-stable <qemu-stable@nongnu.org>
16
Message-ID: <20240422134605.534207-2-ajones@ventanamicro.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
18
---
19
target/riscv/cpu.h | 3 +++
20
target/riscv/csr.c | 18 ++++++++++++++----
21
target/riscv/kvm/kvm-cpu.c | 25 +++++++++++++++++++++++++
22
3 files changed, 42 insertions(+), 4 deletions(-)
23
24
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/target/riscv/cpu.h
27
+++ b/target/riscv/cpu.h
28
@@ -XXX,XX +XXX,XX @@ void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops);
29
30
void riscv_cpu_register_gdb_regs_for_features(CPUState *cs);
31
32
+target_ulong riscv_new_csr_seed(target_ulong new_value,
33
+ target_ulong write_mask);
34
+
35
uint8_t satp_mode_max_from_map(uint32_t map);
36
const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
37
38
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/riscv/csr.c
41
+++ b/target/riscv/csr.c
42
@@ -XXX,XX +XXX,XX @@ static RISCVException write_upmbase(CPURISCVState *env, int csrno,
43
#endif
44
45
/* Crypto Extension */
46
-static RISCVException rmw_seed(CPURISCVState *env, int csrno,
47
- target_ulong *ret_value,
48
- target_ulong new_value,
49
- target_ulong write_mask)
50
+target_ulong riscv_new_csr_seed(target_ulong new_value,
51
+ target_ulong write_mask)
52
{
53
uint16_t random_v;
54
Error *random_e = NULL;
55
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_seed(CPURISCVState *env, int csrno,
56
rval = random_v | SEED_OPST_ES16;
57
}
58
59
+ return rval;
60
+}
61
+
62
+static RISCVException rmw_seed(CPURISCVState *env, int csrno,
63
+ target_ulong *ret_value,
64
+ target_ulong new_value,
65
+ target_ulong write_mask)
66
+{
67
+ target_ulong rval;
68
+
69
+ rval = riscv_new_csr_seed(new_value, write_mask);
70
+
71
if (ret_value) {
72
*ret_value = rval;
73
}
74
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/riscv/kvm/kvm-cpu.c
77
+++ b/target/riscv/kvm/kvm-cpu.c
78
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
79
return ret;
80
}
81
82
+static int kvm_riscv_handle_csr(CPUState *cs, struct kvm_run *run)
83
+{
84
+ target_ulong csr_num = run->riscv_csr.csr_num;
85
+ target_ulong new_value = run->riscv_csr.new_value;
86
+ target_ulong write_mask = run->riscv_csr.write_mask;
87
+ int ret = 0;
88
+
89
+ switch (csr_num) {
90
+ case CSR_SEED:
91
+ run->riscv_csr.ret_value = riscv_new_csr_seed(new_value, write_mask);
92
+ break;
93
+ default:
94
+ qemu_log_mask(LOG_UNIMP,
95
+ "%s: un-handled CSR EXIT for CSR %lx\n",
96
+ __func__, csr_num);
97
+ ret = -1;
98
+ break;
99
+ }
100
+
101
+ return ret;
102
+}
103
+
104
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
105
{
106
int ret = 0;
107
@@ -XXX,XX +XXX,XX @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
108
case KVM_EXIT_RISCV_SBI:
109
ret = kvm_riscv_handle_sbi(cs, run);
110
break;
111
+ case KVM_EXIT_RISCV_CSR:
112
+ ret = kvm_riscv_handle_csr(cs, run);
113
+ break;
114
default:
115
qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
116
__func__, run->exit_reason);
117
--
118
2.45.1
diff view generated by jsdifflib
1
Split the hypervisor execute load functions into two seperate functions.
1
From: Andrew Jones <ajones@ventanamicro.com>
2
This avoids us having to pass the memop to the C helper functions.
3
2
3
Implementing wrs.nto to always just return is consistent with the
4
specification, as the instruction is permitted to terminate the
5
stall for any reason, but it's not useful for virtualization, where
6
we'd like the guest to trap to the hypervisor in order to allow
7
scheduling of the lock holding VCPU. Change to always immediately
8
raise exceptions when the appropriate conditions are present,
9
otherwise continue to just return. Note, immediately raising
10
exceptions is also consistent with the specification since the
11
time limit that should expire prior to the exception is
12
implementation-specific.
13
14
Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
15
Reviewed-by: Christoph Müllner <christoph.muellner@vrull.eu>
16
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
17
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
18
Message-ID: <20240424142808.62936-2-ajones@ventanamicro.com>
4
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
19
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: 5b1550f0faa3c435cc77f3c1ae811dea98ab9e36.1604464950.git.alistair.francis@wdc.com
7
---
20
---
8
target/riscv/helper.h | 3 ++-
21
target/riscv/helper.h | 1 +
9
target/riscv/op_helper.c | 36 +++++++------------------
22
target/riscv/op_helper.c | 11 ++++++++
10
target/riscv/insn_trans/trans_rvh.c.inc | 20 +++++---------
23
target/riscv/insn_trans/trans_rvzawrs.c.inc | 29 ++++++++++++++-------
11
3 files changed, 17 insertions(+), 42 deletions(-)
24
3 files changed, 32 insertions(+), 9 deletions(-)
12
25
13
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
26
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
14
index XXXXXXX..XXXXXXX 100644
27
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/helper.h
28
--- a/target/riscv/helper.h
16
+++ b/target/riscv/helper.h
29
+++ b/target/riscv/helper.h
17
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(tlb_flush, void, env)
30
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
18
#ifndef CONFIG_USER_ONLY
31
DEF_HELPER_1(sret, tl, env)
19
DEF_HELPER_1(hyp_tlb_flush, void, env)
32
DEF_HELPER_1(mret, tl, env)
20
DEF_HELPER_1(hyp_gvma_tlb_flush, void, env)
33
DEF_HELPER_1(wfi, void, env)
21
-DEF_HELPER_4(hyp_x_load, tl, env, tl, tl, tl)
34
+DEF_HELPER_1(wrs_nto, void, env)
22
+DEF_HELPER_2(hyp_hlvx_hu, tl, env, tl)
35
DEF_HELPER_1(tlb_flush, void, env)
23
+DEF_HELPER_2(hyp_hlvx_wu, tl, env, tl)
36
DEF_HELPER_1(tlb_flush_all, void, env)
24
#endif
37
/* Native Debug */
25
26
/* Vector functions */
27
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
38
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
28
index XXXXXXX..XXXXXXX 100644
39
index XXXXXXX..XXXXXXX 100644
29
--- a/target/riscv/op_helper.c
40
--- a/target/riscv/op_helper.c
30
+++ b/target/riscv/op_helper.c
41
+++ b/target/riscv/op_helper.c
31
@@ -XXX,XX +XXX,XX @@ void helper_hyp_gvma_tlb_flush(CPURISCVState *env)
42
@@ -XXX,XX +XXX,XX @@ void helper_wfi(CPURISCVState *env)
32
helper_hyp_tlb_flush(env);
43
}
33
}
44
}
34
45
35
-target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address,
46
+void helper_wrs_nto(CPURISCVState *env)
36
- target_ulong attrs, target_ulong memop)
47
+{
37
+target_ulong helper_hyp_hlvx_hu(CPURISCVState *env, target_ulong address)
48
+ if (env->virt_enabled && (env->priv == PRV_S || env->priv == PRV_U) &&
49
+ get_field(env->hstatus, HSTATUS_VTW) &&
50
+ !get_field(env->mstatus, MSTATUS_TW)) {
51
+ riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
52
+ } else if (env->priv != PRV_M && get_field(env->mstatus, MSTATUS_TW)) {
53
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
54
+ }
55
+}
56
+
57
void helper_tlb_flush(CPURISCVState *env)
38
{
58
{
39
- if (env->priv == PRV_M ||
59
CPUState *cs = env_cpu(env);
40
- (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
60
diff --git a/target/riscv/insn_trans/trans_rvzawrs.c.inc b/target/riscv/insn_trans/trans_rvzawrs.c.inc
41
- (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
61
index XXXXXXX..XXXXXXX 100644
42
- get_field(env->hstatus, HSTATUS_HU))) {
62
--- a/target/riscv/insn_trans/trans_rvzawrs.c.inc
43
- target_ulong pte;
63
+++ b/target/riscv/insn_trans/trans_rvzawrs.c.inc
44
- int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
64
@@ -XXX,XX +XXX,XX @@
45
-
65
* this program. If not, see <http://www.gnu.org/licenses/>.
46
- switch (memop) {
66
*/
47
- case MO_TEUW:
67
48
- pte = cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
68
-static bool trans_wrs(DisasContext *ctx)
49
- break;
69
+static bool trans_wrs_sto(DisasContext *ctx, arg_wrs_sto *a)
50
- case MO_TEUL:
70
{
51
- pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
71
if (!ctx->cfg_ptr->ext_zawrs) {
52
- break;
72
return false;
53
- default:
73
@@ -XXX,XX +XXX,XX @@ static bool trans_wrs(DisasContext *ctx)
54
- g_assert_not_reached();
74
return true;
55
- }
75
}
56
+ int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
76
57
77
-#define GEN_TRANS_WRS(insn) \
58
- return pte;
78
-static bool trans_ ## insn(DisasContext *ctx, arg_ ## insn *a) \
59
- }
79
-{ \
60
+ return cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
80
- (void)a; \
81
- return trans_wrs(ctx); \
82
-}
83
+static bool trans_wrs_nto(DisasContext *ctx, arg_wrs_nto *a)
84
+{
85
+ if (!ctx->cfg_ptr->ext_zawrs) {
86
+ return false;
87
+ }
88
89
-GEN_TRANS_WRS(wrs_nto)
90
-GEN_TRANS_WRS(wrs_sto)
91
+ /*
92
+ * Depending on the mode of execution, mstatus.TW and hstatus.VTW, wrs.nto
93
+ * should raise an exception when the implementation-specific bounded time
94
+ * limit has expired. Our time limit is zero, so we either return
95
+ * immediately, as does our implementation of wrs.sto, or raise an
96
+ * exception, as handled by the wrs.nto helper.
97
+ */
98
+#ifndef CONFIG_USER_ONLY
99
+ gen_helper_wrs_nto(tcg_env);
100
+#endif
101
+
102
+ /* We only get here when helper_wrs_nto() doesn't raise an exception. */
103
+ return trans_wrs_sto(ctx, NULL);
61
+}
104
+}
62
63
- if (riscv_cpu_virt_enabled(env)) {
64
- riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
65
- } else {
66
- riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
67
- }
68
- return 0;
69
+target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong address)
70
+{
71
+ int mmu_idx = cpu_mmu_index(env, true) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
72
+
73
+ return cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
74
}
75
76
#endif /* !CONFIG_USER_ONLY */
77
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/riscv/insn_trans/trans_rvh.c.inc
80
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
81
@@ -XXX,XX +XXX,XX @@ static bool trans_hlvx_hu(DisasContext *ctx, arg_hlvx_hu *a)
82
#ifndef CONFIG_USER_ONLY
83
TCGv t0 = tcg_temp_new();
84
TCGv t1 = tcg_temp_new();
85
- TCGv mem_idx = tcg_temp_new();
86
- TCGv memop = tcg_temp_new();
87
+
88
+ check_access(ctx);
89
90
gen_get_gpr(t0, a->rs1);
91
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
92
- tcg_gen_movi_tl(memop, MO_TEUW);
93
94
- gen_helper_hyp_x_load(t1, cpu_env, t0, mem_idx, memop);
95
+ gen_helper_hyp_hlvx_hu(t1, cpu_env, t0);
96
gen_set_gpr(a->rd, t1);
97
98
tcg_temp_free(t0);
99
tcg_temp_free(t1);
100
- tcg_temp_free(mem_idx);
101
- tcg_temp_free(memop);
102
return true;
103
#else
104
return false;
105
@@ -XXX,XX +XXX,XX @@ static bool trans_hlvx_wu(DisasContext *ctx, arg_hlvx_wu *a)
106
#ifndef CONFIG_USER_ONLY
107
TCGv t0 = tcg_temp_new();
108
TCGv t1 = tcg_temp_new();
109
- TCGv mem_idx = tcg_temp_new();
110
- TCGv memop = tcg_temp_new();
111
+
112
+ check_access(ctx);
113
114
gen_get_gpr(t0, a->rs1);
115
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
116
- tcg_gen_movi_tl(memop, MO_TEUL);
117
118
- gen_helper_hyp_x_load(t1, cpu_env, t0, mem_idx, memop);
119
+ gen_helper_hyp_hlvx_wu(t1, cpu_env, t0);
120
gen_set_gpr(a->rd, t1);
121
122
tcg_temp_free(t0);
123
tcg_temp_free(t1);
124
- tcg_temp_free(mem_idx);
125
- tcg_temp_free(memop);
126
return true;
127
#else
128
return false;
129
--
105
--
130
2.29.2
106
2.45.1
131
107
132
108
diff view generated by jsdifflib
New patch
1
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
3
SBI defines a Debug Console extension "DBCN" that will, in time, replace
4
the legacy console putchar and getchar SBI extensions.
5
6
The appeal of the DBCN extension is that it allows multiple bytes to be
7
read/written in the SBI console in a single SBI call.
8
9
As far as KVM goes, the DBCN calls are forwarded by an in-kernel KVM
10
module to userspace. But this will only happens if the KVM module
11
actually supports this SBI extension and we activate it.
12
13
We'll check for DBCN support during init time, checking if get-reg-list
14
is advertising KVM_RISCV_SBI_EXT_DBCN. In that case, we'll enable it via
15
kvm_set_one_reg() during kvm_arch_init_vcpu().
16
17
Finally, change kvm_riscv_handle_sbi() to handle the incoming calls for
18
SBI_EXT_DBCN, reading and writing as required.
19
20
A simple KVM guest with 'earlycon=sbi', running in an emulated RISC-V
21
host, takes around 20 seconds to boot without using DBCN. With this
22
patch we're taking around 14 seconds to boot due to the speed-up in the
23
terminal output. There's no change in boot time if the guest isn't
24
using earlycon.
25
26
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
27
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
28
Message-ID: <20240425155012.581366-1-dbarboza@ventanamicro.com>
29
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
30
---
31
target/riscv/sbi_ecall_interface.h | 17 +++++
32
target/riscv/kvm/kvm-cpu.c | 111 +++++++++++++++++++++++++++++
33
2 files changed, 128 insertions(+)
34
35
diff --git a/target/riscv/sbi_ecall_interface.h b/target/riscv/sbi_ecall_interface.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/riscv/sbi_ecall_interface.h
38
+++ b/target/riscv/sbi_ecall_interface.h
39
@@ -XXX,XX +XXX,XX @@
40
41
/* clang-format off */
42
43
+#define SBI_SUCCESS 0
44
+#define SBI_ERR_FAILED -1
45
+#define SBI_ERR_NOT_SUPPORTED -2
46
+#define SBI_ERR_INVALID_PARAM -3
47
+#define SBI_ERR_DENIED -4
48
+#define SBI_ERR_INVALID_ADDRESS -5
49
+#define SBI_ERR_ALREADY_AVAILABLE -6
50
+#define SBI_ERR_ALREADY_STARTED -7
51
+#define SBI_ERR_ALREADY_STOPPED -8
52
+#define SBI_ERR_NO_SHMEM -9
53
+
54
/* SBI Extension IDs */
55
#define SBI_EXT_0_1_SET_TIMER 0x0
56
#define SBI_EXT_0_1_CONSOLE_PUTCHAR 0x1
57
@@ -XXX,XX +XXX,XX @@
58
#define SBI_EXT_IPI 0x735049
59
#define SBI_EXT_RFENCE 0x52464E43
60
#define SBI_EXT_HSM 0x48534D
61
+#define SBI_EXT_DBCN 0x4442434E
62
63
/* SBI function IDs for BASE extension */
64
#define SBI_EXT_BASE_GET_SPEC_VERSION 0x0
65
@@ -XXX,XX +XXX,XX @@
66
#define SBI_EXT_HSM_HART_STOP 0x1
67
#define SBI_EXT_HSM_HART_GET_STATUS 0x2
68
69
+/* SBI function IDs for DBCN extension */
70
+#define SBI_EXT_DBCN_CONSOLE_WRITE 0x0
71
+#define SBI_EXT_DBCN_CONSOLE_READ 0x1
72
+#define SBI_EXT_DBCN_CONSOLE_WRITE_BYTE 0x2
73
+
74
#define SBI_HSM_HART_STATUS_STARTED 0x0
75
#define SBI_HSM_HART_STATUS_STOPPED 0x1
76
#define SBI_HSM_HART_STATUS_START_PENDING 0x2
77
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
78
index XXXXXXX..XXXXXXX 100644
79
--- a/target/riscv/kvm/kvm-cpu.c
80
+++ b/target/riscv/kvm/kvm-cpu.c
81
@@ -XXX,XX +XXX,XX @@ static KVMCPUConfig kvm_v_vlenb = {
82
KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)
83
};
84
85
+static KVMCPUConfig kvm_sbi_dbcn = {
86
+ .name = "sbi_dbcn",
87
+ .kvm_reg_id = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
88
+ KVM_REG_RISCV_SBI_EXT | KVM_RISCV_SBI_EXT_DBCN
89
+};
90
+
91
static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
92
{
93
CPURISCVState *env = &cpu->env;
94
@@ -XXX,XX +XXX,XX @@ static int uint64_cmp(const void *a, const void *b)
95
return 0;
96
}
97
98
+static void kvm_riscv_check_sbi_dbcn_support(RISCVCPU *cpu,
99
+ KVMScratchCPU *kvmcpu,
100
+ struct kvm_reg_list *reglist)
101
+{
102
+ struct kvm_reg_list *reg_search;
103
+
104
+ reg_search = bsearch(&kvm_sbi_dbcn.kvm_reg_id, reglist->reg, reglist->n,
105
+ sizeof(uint64_t), uint64_cmp);
106
+
107
+ if (reg_search) {
108
+ kvm_sbi_dbcn.supported = true;
109
+ }
110
+}
111
+
112
static void kvm_riscv_read_vlenb(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
113
struct kvm_reg_list *reglist)
114
{
115
@@ -XXX,XX +XXX,XX @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
116
if (riscv_has_ext(&cpu->env, RVV)) {
117
kvm_riscv_read_vlenb(cpu, kvmcpu, reglist);
118
}
119
+
120
+ kvm_riscv_check_sbi_dbcn_support(cpu, kvmcpu, reglist);
121
}
122
123
static void riscv_init_kvm_registers(Object *cpu_obj)
124
@@ -XXX,XX +XXX,XX @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
125
return ret;
126
}
127
128
+static int kvm_vcpu_enable_sbi_dbcn(RISCVCPU *cpu, CPUState *cs)
129
+{
130
+ target_ulong reg = 1;
131
+
132
+ if (!kvm_sbi_dbcn.supported) {
133
+ return 0;
134
+ }
135
+
136
+ return kvm_set_one_reg(cs, kvm_sbi_dbcn.kvm_reg_id, &reg);
137
+}
138
+
139
int kvm_arch_init_vcpu(CPUState *cs)
140
{
141
int ret = 0;
142
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init_vcpu(CPUState *cs)
143
kvm_riscv_update_cpu_misa_ext(cpu, cs);
144
kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs);
145
146
+ ret = kvm_vcpu_enable_sbi_dbcn(cpu, cs);
147
+
148
return ret;
149
}
150
151
@@ -XXX,XX +XXX,XX @@ bool kvm_arch_stop_on_emulation_error(CPUState *cs)
152
return true;
153
}
154
155
+static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run)
156
+{
157
+ g_autofree uint8_t *buf = NULL;
158
+ RISCVCPU *cpu = RISCV_CPU(cs);
159
+ target_ulong num_bytes;
160
+ uint64_t addr;
161
+ unsigned char ch;
162
+ int ret;
163
+
164
+ switch (run->riscv_sbi.function_id) {
165
+ case SBI_EXT_DBCN_CONSOLE_READ:
166
+ case SBI_EXT_DBCN_CONSOLE_WRITE:
167
+ num_bytes = run->riscv_sbi.args[0];
168
+
169
+ if (num_bytes == 0) {
170
+ run->riscv_sbi.ret[0] = SBI_SUCCESS;
171
+ run->riscv_sbi.ret[1] = 0;
172
+ break;
173
+ }
174
+
175
+ addr = run->riscv_sbi.args[1];
176
+
177
+ /*
178
+ * Handle the case where a 32 bit CPU is running in a
179
+ * 64 bit addressing env.
180
+ */
181
+ if (riscv_cpu_mxl(&cpu->env) == MXL_RV32) {
182
+ addr |= (uint64_t)run->riscv_sbi.args[2] << 32;
183
+ }
184
+
185
+ buf = g_malloc0(num_bytes);
186
+
187
+ if (run->riscv_sbi.function_id == SBI_EXT_DBCN_CONSOLE_READ) {
188
+ ret = qemu_chr_fe_read_all(serial_hd(0)->be, buf, num_bytes);
189
+ if (ret < 0) {
190
+ error_report("SBI_EXT_DBCN_CONSOLE_READ: error when "
191
+ "reading chardev");
192
+ exit(1);
193
+ }
194
+
195
+ cpu_physical_memory_write(addr, buf, ret);
196
+ } else {
197
+ cpu_physical_memory_read(addr, buf, num_bytes);
198
+
199
+ ret = qemu_chr_fe_write_all(serial_hd(0)->be, buf, num_bytes);
200
+ if (ret < 0) {
201
+ error_report("SBI_EXT_DBCN_CONSOLE_WRITE: error when "
202
+ "writing chardev");
203
+ exit(1);
204
+ }
205
+ }
206
+
207
+ run->riscv_sbi.ret[0] = SBI_SUCCESS;
208
+ run->riscv_sbi.ret[1] = ret;
209
+ break;
210
+ case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
211
+ ch = run->riscv_sbi.args[0];
212
+ ret = qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
213
+
214
+ if (ret < 0) {
215
+ error_report("SBI_EXT_DBCN_CONSOLE_WRITE_BYTE: error when "
216
+ "writing chardev");
217
+ exit(1);
218
+ }
219
+
220
+ run->riscv_sbi.ret[0] = SBI_SUCCESS;
221
+ run->riscv_sbi.ret[1] = 0;
222
+ break;
223
+ default:
224
+ run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
225
+ }
226
+}
227
+
228
static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
229
{
230
int ret = 0;
231
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
232
}
233
ret = 0;
234
break;
235
+ case SBI_EXT_DBCN:
236
+ kvm_riscv_handle_sbi_dbcn(cs, run);
237
+ break;
238
default:
239
qemu_log_mask(LOG_UNIMP,
240
"%s: un-handled SBI EXIT, specific reasons is %lu\n",
241
--
242
2.45.1
diff view generated by jsdifflib
New patch
1
From: Cheng Yang <yangcheng.work@foxmail.com>
1
2
3
Use qemu_fdt_setprop_u64() instead of qemu_fdt_setprop_cell()
4
to set the address of initrd in FDT to support 64-bit address.
5
6
Signed-off-by: Cheng Yang <yangcheng.work@foxmail.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-ID: <tencent_A4482251DD0890F312758FA6B33F60815609@qq.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
hw/riscv/boot.c | 4 ++--
12
1 file changed, 2 insertions(+), 2 deletions(-)
13
14
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/hw/riscv/boot.c
17
+++ b/hw/riscv/boot.c
18
@@ -XXX,XX +XXX,XX @@ static void riscv_load_initrd(MachineState *machine, uint64_t kernel_entry)
19
/* Some RISC-V machines (e.g. opentitan) don't have a fdt. */
20
if (fdt) {
21
end = start + size;
22
- qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start", start);
23
- qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end", end);
24
+ qemu_fdt_setprop_u64(fdt, "/chosen", "linux,initrd-start", start);
25
+ qemu_fdt_setprop_u64(fdt, "/chosen", "linux,initrd-end", end);
26
}
27
}
28
29
--
30
2.45.1
diff view generated by jsdifflib
New patch
1
From: Clément Léger <cleger@rivosinc.com>
1
2
3
The current semihost exception number (16) is a reserved number (range
4
[16-17]). The upcoming double trap specification uses that number for
5
the double trap exception. Since the privileged spec (Table 22) defines
6
ranges for custom uses change the semihosting exception number to 63
7
which belongs to the range [48-63] in order to avoid any future
8
collisions with reserved exception.
9
10
Signed-off-by: Clément Léger <cleger@rivosinc.com>
11
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-ID: <20240422135840.1959967-1-cleger@rivosinc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
target/riscv/cpu_bits.h | 2 +-
17
1 file changed, 1 insertion(+), 1 deletion(-)
18
19
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu_bits.h
22
+++ b/target/riscv/cpu_bits.h
23
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
24
RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */
25
RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */
26
RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */
27
- RISCV_EXCP_SEMIHOST = 0x10,
28
RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14,
29
RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT = 0x15,
30
RISCV_EXCP_VIRT_INSTRUCTION_FAULT = 0x16,
31
RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT = 0x17,
32
+ RISCV_EXCP_SEMIHOST = 0x3f,
33
} RISCVException;
34
35
#define RISCV_EXCP_INT_FLAG 0x80000000
36
--
37
2.45.1
38
39
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Running a KVM guest using a 6.9-rc3 kernel, in a 6.8 host that has zkr
4
enabled, will fail with a kernel oops SIGILL right at the start. The
5
reason is that we can't expose zkr without implementing the SEED CSR.
6
Disabling zkr in the guest would be a workaround, but if the KVM doesn't
7
allow it we'll error out and never boot.
8
9
In hindsight this is too strict. If we keep proceeding, despite not
10
disabling the extension in the KVM vcpu, we'll not add the extension in
11
the riscv,isa. The guest kernel will be unaware of the extension, i.e.
12
it doesn't matter if the KVM vcpu has it enabled underneath or not. So
13
it's ok to keep booting in this case.
14
15
Change our current logic to not error out if we fail to disable an
16
extension in kvm_set_one_reg(), but show a warning and keep booting. It
17
is important to throw a warning because we must make the user aware that
18
the extension is still available in the vcpu, meaning that an
19
ill-behaved guest can ignore the riscv,isa settings and use the
20
extension.
21
22
The case we're handling happens with an EINVAL error code. If we fail to
23
disable the extension in KVM for any other reason, error out.
24
25
We'll also keep erroring out when we fail to enable an extension in KVM,
26
since adding the extension in riscv,isa at this point will cause a guest
27
malfunction because the extension isn't enabled in the vcpu.
28
29
Suggested-by: Andrew Jones <ajones@ventanamicro.com>
30
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
31
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
32
Cc: qemu-stable <qemu-stable@nongnu.org>
33
Message-ID: <20240422171425.333037-2-dbarboza@ventanamicro.com>
34
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
35
---
36
target/riscv/kvm/kvm-cpu.c | 12 ++++++++----
37
1 file changed, 8 insertions(+), 4 deletions(-)
38
39
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/riscv/kvm/kvm-cpu.c
42
+++ b/target/riscv/kvm/kvm-cpu.c
43
@@ -XXX,XX +XXX,XX @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
44
reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
45
ret = kvm_set_one_reg(cs, id, &reg);
46
if (ret != 0) {
47
- error_report("Unable to %s extension %s in KVM, error %d",
48
- reg ? "enable" : "disable",
49
- multi_ext_cfg->name, ret);
50
- exit(EXIT_FAILURE);
51
+ if (!reg && ret == -EINVAL) {
52
+ warn_report("KVM cannot disable extension %s",
53
+ multi_ext_cfg->name);
54
+ } else {
55
+ error_report("Unable to enable extension %s in KVM, error %d",
56
+ multi_ext_cfg->name, ret);
57
+ exit(EXIT_FAILURE);
58
+ }
59
}
60
}
61
}
62
--
63
2.45.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
We're not setting (s/m)tval when triggering breakpoints of type 2
4
(mcontrol) and 6 (mcontrol6). According to the debug spec section
5
5.7.12, "Match Control Type 6":
6
7
"The Privileged Spec says that breakpoint exceptions that occur on
8
instruction fetches, loads, or stores update the tval CSR with either
9
zero or the faulting virtual address. The faulting virtual address for
10
an mcontrol6 trigger with action = 0 is the address being accessed and
11
which caused that trigger to fire."
12
13
A similar text is also found in the Debug spec section 5.7.11 w.r.t.
14
mcontrol.
15
16
Note that what we're doing ATM is not violating the spec, but it's
17
simple enough to set mtval/stval and it makes life easier for any
18
software that relies on this info.
19
20
Given that we always use action = 0, save the faulting address for the
21
mcontrol and mcontrol6 trigger breakpoints into env->badaddr, which is
22
used as as scratch area for traps with address information. 'tval' is
23
then set during riscv_cpu_do_interrupt().
24
25
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
26
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
27
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
28
Message-ID: <20240416230437.1869024-2-dbarboza@ventanamicro.com>
29
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
30
---
31
target/riscv/cpu_helper.c | 1 +
32
target/riscv/debug.c | 3 +++
33
2 files changed, 4 insertions(+)
34
35
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/riscv/cpu_helper.c
38
+++ b/target/riscv/cpu_helper.c
39
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
40
tval = env->bins;
41
break;
42
case RISCV_EXCP_BREAKPOINT:
43
+ tval = env->badaddr;
44
if (cs->watchpoint_hit) {
45
tval = cs->watchpoint_hit->hitaddr;
46
cs->watchpoint_hit = NULL;
47
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/riscv/debug.c
50
+++ b/target/riscv/debug.c
51
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_debug_check_breakpoint(CPUState *cs)
52
if ((ctrl & TYPE2_EXEC) && (bp->pc == pc)) {
53
/* check U/S/M bit against current privilege level */
54
if ((ctrl >> 3) & BIT(env->priv)) {
55
+ env->badaddr = pc;
56
return true;
57
}
58
}
59
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_debug_check_breakpoint(CPUState *cs)
60
if (env->virt_enabled) {
61
/* check VU/VS bit against current privilege level */
62
if ((ctrl >> 23) & BIT(env->priv)) {
63
+ env->badaddr = pc;
64
return true;
65
}
66
} else {
67
/* check U/S/M bit against current privilege level */
68
if ((ctrl >> 3) & BIT(env->priv)) {
69
+ env->badaddr = pc;
70
return true;
71
}
72
}
73
--
74
2.45.1
diff view generated by jsdifflib
New patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
2
3
Privileged spec section 4.1.9 mentions:
4
5
"When a trap is taken into S-mode, stval is written with
6
exception-specific information to assist software in handling the trap.
7
(...)
8
9
If stval is written with a nonzero value when a breakpoint,
10
address-misaligned, access-fault, or page-fault exception occurs on an
11
instruction fetch, load, or store, then stval will contain the faulting
12
virtual address."
13
14
A similar text is found for mtval in section 3.1.16.
15
16
Setting mtval/stval in this scenario is optional, but some softwares read
17
these regs when handling ebreaks.
18
19
Write 'badaddr' in all ebreak breakpoints to write the appropriate
20
'tval' during riscv_do_cpu_interrrupt().
21
22
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
23
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
24
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-ID: <20240416230437.1869024-3-dbarboza@ventanamicro.com>
27
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
28
---
29
target/riscv/insn_trans/trans_privileged.c.inc | 2 ++
30
1 file changed, 2 insertions(+)
31
32
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/insn_trans/trans_privileged.c.inc
35
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
36
@@ -XXX,XX +XXX,XX @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
37
if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) {
38
generate_exception(ctx, RISCV_EXCP_SEMIHOST);
39
} else {
40
+ tcg_gen_st_tl(tcg_constant_tl(ebreak_addr), tcg_env,
41
+ offsetof(CPURISCVState, badaddr));
42
generate_exception(ctx, RISCV_EXCP_BREAKPOINT);
43
}
44
return true;
45
--
46
2.45.1
diff view generated by jsdifflib
New patch
1
From: Jason Chien <jason.chien@sifive.com>
1
2
3
Add support for Zve32x extension and replace some checks for Zve32f with
4
Zve32x, since Zve32f depends on Zve32x.
5
6
Signed-off-by: Jason Chien <jason.chien@sifive.com>
7
Reviewed-by: Frank Chang <frank.chang@sifive.com>
8
Reviewed-by: Max Chou <max.chou@sifive.com>
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Message-ID: <20240328022343.6871-2-jason.chien@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/cpu_cfg.h | 1 +
14
target/riscv/cpu.c | 2 ++
15
target/riscv/cpu_helper.c | 2 +-
16
target/riscv/csr.c | 2 +-
17
target/riscv/tcg/tcg-cpu.c | 16 ++++++++--------
18
target/riscv/insn_trans/trans_rvv.c.inc | 4 ++--
19
6 files changed, 15 insertions(+), 12 deletions(-)
20
21
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/cpu_cfg.h
24
+++ b/target/riscv/cpu_cfg.h
25
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
26
bool ext_zhinx;
27
bool ext_zhinxmin;
28
bool ext_zve32f;
29
+ bool ext_zve32x;
30
bool ext_zve64f;
31
bool ext_zve64d;
32
bool ext_zvbb;
33
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/riscv/cpu.c
36
+++ b/target/riscv/cpu.c
37
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
38
ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
39
ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
40
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
41
+ ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
42
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
43
ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
44
ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
45
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
46
MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false),
47
MULTI_EXT_CFG_BOOL("zfhmin", ext_zfhmin, false),
48
MULTI_EXT_CFG_BOOL("zve32f", ext_zve32f, false),
49
+ MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
50
MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
51
MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
52
MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
53
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/target/riscv/cpu_helper.c
56
+++ b/target/riscv/cpu_helper.c
57
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
58
*pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
59
*cs_base = 0;
60
61
- if (cpu->cfg.ext_zve32f) {
62
+ if (cpu->cfg.ext_zve32x) {
63
/*
64
* If env->vl equals to VLMAX, we can use generic vector operation
65
* expanders (GVEC) to accerlate the vector operations.
66
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
67
index XXXXXXX..XXXXXXX 100644
68
--- a/target/riscv/csr.c
69
+++ b/target/riscv/csr.c
70
@@ -XXX,XX +XXX,XX @@ static RISCVException fs(CPURISCVState *env, int csrno)
71
72
static RISCVException vs(CPURISCVState *env, int csrno)
73
{
74
- if (riscv_cpu_cfg(env)->ext_zve32f) {
75
+ if (riscv_cpu_cfg(env)->ext_zve32x) {
76
#if !defined(CONFIG_USER_ONLY)
77
if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
78
return RISCV_EXCP_ILLEGAL_INST;
79
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
80
index XXXXXXX..XXXXXXX 100644
81
--- a/target/riscv/tcg/tcg-cpu.c
82
+++ b/target/riscv/tcg/tcg-cpu.c
83
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
84
return;
85
}
86
87
- if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) {
88
- error_setg(errp, "Zve32f/Zve64f extensions require F extension");
89
- return;
90
+ /* The Zve32f extension depends on the Zve32x extension */
91
+ if (cpu->cfg.ext_zve32f) {
92
+ if (!riscv_has_ext(env, RVF)) {
93
+ error_setg(errp, "Zve32f/Zve64f extensions require F extension");
94
+ return;
95
+ }
96
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32x), true);
97
}
98
99
if (cpu->cfg.ext_zvfh) {
100
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
101
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
102
}
103
104
- /*
105
- * In principle Zve*x would also suffice here, were they supported
106
- * in qemu
107
- */
108
if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg ||
109
cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed ||
110
- cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
111
+ cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32x) {
112
error_setg(errp,
113
"Vector crypto extensions require V or Zve* extensions");
114
return;
115
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
116
index XXXXXXX..XXXXXXX 100644
117
--- a/target/riscv/insn_trans/trans_rvv.c.inc
118
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
119
@@ -XXX,XX +XXX,XX @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
120
{
121
TCGv s1, dst;
122
123
- if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
124
+ if (!require_rvv(s) || !s->cfg_ptr->ext_zve32x) {
125
return false;
126
}
127
128
@@ -XXX,XX +XXX,XX @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
129
{
130
TCGv dst;
131
132
- if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
133
+ if (!require_rvv(s) || !s->cfg_ptr->ext_zve32x) {
134
return false;
135
}
136
137
--
138
2.45.1
diff view generated by jsdifflib
New patch
1
From: Jason Chien <jason.chien@sifive.com>
1
2
3
Add support for Zve64x extension. Enabling Zve64f enables Zve64x and
4
enabling Zve64x enables Zve32x according to their dependency.
5
6
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2107
7
Signed-off-by: Jason Chien <jason.chien@sifive.com>
8
Reviewed-by: Frank Chang <frank.chang@sifive.com>
9
Reviewed-by: Max Chou <max.chou@sifive.com>
10
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
11
Message-ID: <20240328022343.6871-3-jason.chien@sifive.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
target/riscv/cpu_cfg.h | 1 +
15
target/riscv/cpu.c | 2 ++
16
target/riscv/tcg/tcg-cpu.c | 17 +++++++++++------
17
3 files changed, 14 insertions(+), 6 deletions(-)
18
19
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu_cfg.h
22
+++ b/target/riscv/cpu_cfg.h
23
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
24
bool ext_zve32x;
25
bool ext_zve64f;
26
bool ext_zve64d;
27
+ bool ext_zve64x;
28
bool ext_zvbb;
29
bool ext_zvbc;
30
bool ext_zvkb;
31
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/cpu.c
34
+++ b/target/riscv/cpu.c
35
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
36
ISA_EXT_DATA_ENTRY(zve32x, PRIV_VERSION_1_10_0, ext_zve32x),
37
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
38
ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
39
+ ISA_EXT_DATA_ENTRY(zve64x, PRIV_VERSION_1_10_0, ext_zve64x),
40
ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
41
ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
42
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
43
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
44
MULTI_EXT_CFG_BOOL("zve32x", ext_zve32x, false),
45
MULTI_EXT_CFG_BOOL("zve64f", ext_zve64f, false),
46
MULTI_EXT_CFG_BOOL("zve64d", ext_zve64d, false),
47
+ MULTI_EXT_CFG_BOOL("zve64x", ext_zve64x, false),
48
MULTI_EXT_CFG_BOOL("zvfbfmin", ext_zvfbfmin, false),
49
MULTI_EXT_CFG_BOOL("zvfbfwma", ext_zvfbfwma, false),
50
MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
51
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
52
index XXXXXXX..XXXXXXX 100644
53
--- a/target/riscv/tcg/tcg-cpu.c
54
+++ b/target/riscv/tcg/tcg-cpu.c
55
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
56
57
/* The Zve64d extension depends on the Zve64f extension */
58
if (cpu->cfg.ext_zve64d) {
59
+ if (!riscv_has_ext(env, RVD)) {
60
+ error_setg(errp, "Zve64d/V extensions require D extension");
61
+ return;
62
+ }
63
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true);
64
}
65
66
- /* The Zve64f extension depends on the Zve32f extension */
67
+ /* The Zve64f extension depends on the Zve64x and Zve32f extensions */
68
if (cpu->cfg.ext_zve64f) {
69
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64x), true);
70
cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true);
71
}
72
73
- if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) {
74
- error_setg(errp, "Zve64d/V extensions require D extension");
75
- return;
76
+ /* The Zve64x extension depends on the Zve32x extension */
77
+ if (cpu->cfg.ext_zve64x) {
78
+ cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32x), true);
79
}
80
81
/* The Zve32f extension depends on the Zve32x extension */
82
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
83
return;
84
}
85
86
- if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) {
87
+ if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64x) {
88
error_setg(
89
errp,
90
- "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
91
+ "Zvbc and Zvknhb extensions require V or Zve64x extensions");
92
return;
93
}
94
95
--
96
2.45.1
diff view generated by jsdifflib
New patch
1
From: Jason Chien <jason.chien@sifive.com>
1
2
3
In current implementation, the gdbstub allows reading vector registers
4
only if V extension is supported. However, all vector extensions and
5
vector crypto extensions have the vector registers and they all depend
6
on Zve32x. The gdbstub should check for Zve32x instead.
7
8
Signed-off-by: Jason Chien <jason.chien@sifive.com>
9
Reviewed-by: Frank Chang <frank.chang@sifive.com>
10
Reviewed-by: Max Chou <max.chou@sifive.com>
11
Message-ID: <20240328022343.6871-4-jason.chien@sifive.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
target/riscv/gdbstub.c | 2 +-
15
1 file changed, 1 insertion(+), 1 deletion(-)
16
17
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/gdbstub.c
20
+++ b/target/riscv/gdbstub.c
21
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs)
22
gdb_find_static_feature("riscv-32bit-fpu.xml"),
23
0);
24
}
25
- if (env->misa_ext & RVV) {
26
+ if (cpu->cfg.ext_zve32x) {
27
gdb_register_coprocessor(cs, riscv_gdb_get_vector,
28
riscv_gdb_set_vector,
29
ricsv_gen_dynamic_vector_feature(cs, cs->gdb_num_regs),
30
--
31
2.45.1
diff view generated by jsdifflib
New patch
1
From: Huang Tao <eric.huang@linux.alibaba.com>
1
2
3
In RVV and vcrypto instructions, the masked and tail elements are set to 1s
4
using vext_set_elems_1s function if the vma/vta bit is set. It is the element
5
agnostic policy.
6
7
However, this function can't deal the big endian situation. This patch fixes
8
the problem by adding handling of such case.
9
10
Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
11
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
12
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
13
Cc: qemu-stable <qemu-stable@nongnu.org>
14
Message-ID: <20240325021654.6594-1-eric.huang@linux.alibaba.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/vector_internals.c | 22 ++++++++++++++++++++++
18
1 file changed, 22 insertions(+)
19
20
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/vector_internals.c
23
+++ b/target/riscv/vector_internals.c
24
@@ -XXX,XX +XXX,XX @@ void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
25
if (tot - cnt == 0) {
26
return ;
27
}
28
+
29
+ if (HOST_BIG_ENDIAN) {
30
+ /*
31
+ * Deal the situation when the elements are insdie
32
+ * only one uint64 block including setting the
33
+ * masked-off element.
34
+ */
35
+ if (((tot - 1) ^ cnt) < 8) {
36
+ memset(base + H1(tot - 1), -1, tot - cnt);
37
+ return;
38
+ }
39
+ /*
40
+ * Otherwise, at least cross two uint64_t blocks.
41
+ * Set first unaligned block.
42
+ */
43
+ if (cnt % 8 != 0) {
44
+ uint32_t j = ROUND_UP(cnt, 8);
45
+ memset(base + H1(j - 1), -1, j - cnt);
46
+ cnt = j;
47
+ }
48
+ /* Set other 64bit aligend blocks */
49
+ }
50
memset(base + cnt, -1, tot - cnt);
51
}
52
53
--
54
2.45.1
diff view generated by jsdifflib
New patch
1
From: Yangyu Chen <cyy@cyyself.name>
1
2
3
This code has a typo that writes zvkb to zvkg, causing users can't
4
enable zvkb through the config. This patch gets this fixed.
5
6
Signed-off-by: Yangyu Chen <cyy@cyyself.name>
7
Fixes: ea61ef7097d0 ("target/riscv: Move vector crypto extensions to riscv_cpu_extensions")
8
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Reviewed-by: Max Chou <max.chou@sifive.com>
11
Reviewed-by:  Weiwei Li <liwei1518@gmail.com>
12
Message-ID: <tencent_7E34EEF0F90B9A68BF38BEE09EC6D4877C0A@qq.com>
13
Cc: qemu-stable <qemu-stable@nongnu.org>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
target/riscv/cpu.c | 2 +-
17
1 file changed, 1 insertion(+), 1 deletion(-)
18
19
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu.c
22
+++ b/target/riscv/cpu.c
23
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
24
/* Vector cryptography extensions */
25
MULTI_EXT_CFG_BOOL("zvbb", ext_zvbb, false),
26
MULTI_EXT_CFG_BOOL("zvbc", ext_zvbc, false),
27
- MULTI_EXT_CFG_BOOL("zvkb", ext_zvkg, false),
28
+ MULTI_EXT_CFG_BOOL("zvkb", ext_zvkb, false),
29
MULTI_EXT_CFG_BOOL("zvkg", ext_zvkg, false),
30
MULTI_EXT_CFG_BOOL("zvkned", ext_zvkned, false),
31
MULTI_EXT_CFG_BOOL("zvknha", ext_zvknha, false),
32
--
33
2.45.1
34
35
diff view generated by jsdifflib
1
Remove the special Virtulisation load and store functions and just use
1
From: Huang Tao <eric.huang@linux.alibaba.com>
2
the standard tcg tcg_gen_qemu_ld_tl() and tcg_gen_qemu_st_tl() functions
3
instead.
4
2
5
As part of this change we ensure we still run an access check to make
3
In this patch, we modify the decoder to be a freely composable data
6
sure we can perform the operations.
4
structure instead of a hardcoded one. It can be dynamically builded up
5
according to the extensions.
6
This approach has several benefits:
7
1. Provides support for heterogeneous cpu architectures. As we add decoder in
8
RISCVCPU, each cpu can have their own decoder, and the decoders can be
9
different due to cpu's features.
10
2. Improve the decoding efficiency. We run the guard_func to see if the decoder
11
can be added to the dynamic_decoder when building up the decoder. Therefore,
12
there is no need to run the guard_func when decoding each instruction. It can
13
improve the decoding efficiency
14
3. For vendor or dynamic cpus, it allows them to customize their own decoder
15
functions to improve decoding efficiency, especially when vendor-defined
16
instruction sets increase. Because of dynamic building up, it can skip the other
17
decoder guard functions when decoding.
18
4. Pre patch for allowing adding a vendor decoder before decode_insn32() with minimal
19
overhead for users that don't need this particular vendor decoder.
7
20
21
Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
22
Suggested-by: Christoph Muellner <christoph.muellner@vrull.eu>
23
Co-authored-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
24
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
25
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
26
Message-ID: <20240506023607.29544-1-eric.huang@linux.alibaba.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
27
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-id: 189ac3e53ef2854824d18aad7074c6649f17de2c.1604464950.git.alistair.francis@wdc.com
11
---
28
---
12
target/riscv/cpu.h | 12 +++
29
target/riscv/cpu.h | 1 +
13
target/riscv/helper.h | 2 -
30
target/riscv/tcg/tcg-cpu.h | 15 +++++++++++++++
14
target/riscv/op_helper.c | 86 -----------------
31
target/riscv/cpu.c | 1 +
15
target/riscv/translate.c | 2 +
32
target/riscv/tcg/tcg-cpu.c | 15 +++++++++++++++
16
target/riscv/insn_trans/trans_rvh.c.inc | 123 +++++++++---------------
33
target/riscv/translate.c | 31 +++++++++++++++----------------
17
5 files changed, 59 insertions(+), 166 deletions(-)
34
5 files changed, 47 insertions(+), 16 deletions(-)
18
35
19
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
36
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
20
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu.h
38
--- a/target/riscv/cpu.h
22
+++ b/target/riscv/cpu.h
39
+++ b/target/riscv/cpu.h
23
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1)
40
@@ -XXX,XX +XXX,XX @@ struct ArchCPU {
24
FIELD(TB_FLAGS, LMUL, 3, 2)
41
uint32_t pmu_avail_ctrs;
25
FIELD(TB_FLAGS, SEW, 5, 3)
42
/* Mapping of events to counters */
26
FIELD(TB_FLAGS, VILL, 8, 1)
43
GHashTable *pmu_event_ctr_map;
27
+/* Is a Hypervisor instruction load/store allowed? */
44
+ const GPtrArray *decoders;
28
+FIELD(TB_FLAGS, HLSX, 9, 1)
45
};
29
46
30
/*
47
/**
31
* A simplification for VLMAX
48
diff --git a/target/riscv/tcg/tcg-cpu.h b/target/riscv/tcg/tcg-cpu.h
32
@@ -XXX,XX +XXX,XX @@ static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
49
index XXXXXXX..XXXXXXX 100644
33
if (riscv_cpu_fp_enabled(env)) {
50
--- a/target/riscv/tcg/tcg-cpu.h
34
flags |= env->mstatus & MSTATUS_FS;
51
+++ b/target/riscv/tcg/tcg-cpu.h
52
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp);
53
void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
54
bool riscv_cpu_tcg_compatible(RISCVCPU *cpu);
55
56
+struct DisasContext;
57
+struct RISCVCPUConfig;
58
+typedef struct RISCVDecoder {
59
+ bool (*guard_func)(const struct RISCVCPUConfig *);
60
+ bool (*riscv_cpu_decode_fn)(struct DisasContext *, uint32_t);
61
+} RISCVDecoder;
62
+
63
+typedef bool (*riscv_cpu_decode_fn)(struct DisasContext *, uint32_t);
64
+
65
+extern const size_t decoder_table_size;
66
+
67
+extern const RISCVDecoder decoder_table[];
68
+
69
+void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu);
70
+
71
#endif
72
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/riscv/cpu.c
75
+++ b/target/riscv/cpu.c
76
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
77
error_propagate(errp, local_err);
78
return;
79
}
80
+ riscv_tcg_cpu_finalize_dynamic_decoder(cpu);
81
} else if (kvm_enabled()) {
82
riscv_kvm_cpu_finalize_features(cpu, &local_err);
83
if (local_err != NULL) {
84
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/target/riscv/tcg/tcg-cpu.c
87
+++ b/target/riscv/tcg/tcg-cpu.c
88
@@ -XXX,XX +XXX,XX @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
35
}
89
}
36
+
90
}
37
+ if (riscv_has_ext(env, RVH)) {
91
38
+ if (env->priv == PRV_M ||
92
+void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
39
+ (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
93
+{
40
+ (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
94
+ GPtrArray *dynamic_decoders;
41
+ get_field(env->hstatus, HSTATUS_HU))) {
95
+ dynamic_decoders = g_ptr_array_sized_new(decoder_table_size);
42
+ flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
96
+ for (size_t i = 0; i < decoder_table_size; ++i) {
97
+ if (decoder_table[i].guard_func &&
98
+ decoder_table[i].guard_func(&cpu->cfg)) {
99
+ g_ptr_array_add(dynamic_decoders,
100
+ (gpointer)decoder_table[i].riscv_cpu_decode_fn);
43
+ }
101
+ }
44
+ }
102
+ }
45
#endif
46
+
103
+
47
*pflags = flags;
104
+ cpu->decoders = dynamic_decoders;
48
}
105
+}
49
106
+
50
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
107
bool riscv_cpu_tcg_compatible(RISCVCPU *cpu)
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/riscv/helper.h
53
+++ b/target/riscv/helper.h
54
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(tlb_flush, void, env)
55
#ifndef CONFIG_USER_ONLY
56
DEF_HELPER_1(hyp_tlb_flush, void, env)
57
DEF_HELPER_1(hyp_gvma_tlb_flush, void, env)
58
-DEF_HELPER_4(hyp_load, tl, env, tl, tl, tl)
59
-DEF_HELPER_5(hyp_store, void, env, tl, tl, tl, tl)
60
DEF_HELPER_4(hyp_x_load, tl, env, tl, tl, tl)
61
#endif
62
63
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/riscv/op_helper.c
66
+++ b/target/riscv/op_helper.c
67
@@ -XXX,XX +XXX,XX @@ void helper_hyp_gvma_tlb_flush(CPURISCVState *env)
68
helper_hyp_tlb_flush(env);
69
}
70
71
-target_ulong helper_hyp_load(CPURISCVState *env, target_ulong address,
72
- target_ulong attrs, target_ulong memop)
73
-{
74
- if (env->priv == PRV_M ||
75
- (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
76
- (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
77
- get_field(env->hstatus, HSTATUS_HU))) {
78
- target_ulong pte;
79
- int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
80
-
81
- switch (memop) {
82
- case MO_SB:
83
- pte = cpu_ldsb_mmuidx_ra(env, address, mmu_idx, GETPC());
84
- break;
85
- case MO_UB:
86
- pte = cpu_ldub_mmuidx_ra(env, address, mmu_idx, GETPC());
87
- break;
88
- case MO_TESW:
89
- pte = cpu_ldsw_mmuidx_ra(env, address, mmu_idx, GETPC());
90
- break;
91
- case MO_TEUW:
92
- pte = cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
93
- break;
94
- case MO_TESL:
95
- pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
96
- break;
97
- case MO_TEUL:
98
- pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
99
- break;
100
- case MO_TEQ:
101
- pte = cpu_ldq_mmuidx_ra(env, address, mmu_idx, GETPC());
102
- break;
103
- default:
104
- g_assert_not_reached();
105
- }
106
-
107
- return pte;
108
- }
109
-
110
- if (riscv_cpu_virt_enabled(env)) {
111
- riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
112
- } else {
113
- riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
114
- }
115
- return 0;
116
-}
117
-
118
-void helper_hyp_store(CPURISCVState *env, target_ulong address,
119
- target_ulong val, target_ulong attrs, target_ulong memop)
120
-{
121
- if (env->priv == PRV_M ||
122
- (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
123
- (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
124
- get_field(env->hstatus, HSTATUS_HU))) {
125
- int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
126
-
127
- switch (memop) {
128
- case MO_SB:
129
- case MO_UB:
130
- cpu_stb_mmuidx_ra(env, address, val, mmu_idx, GETPC());
131
- break;
132
- case MO_TESW:
133
- case MO_TEUW:
134
- cpu_stw_mmuidx_ra(env, address, val, mmu_idx, GETPC());
135
- break;
136
- case MO_TESL:
137
- case MO_TEUL:
138
- cpu_stl_mmuidx_ra(env, address, val, mmu_idx, GETPC());
139
- break;
140
- case MO_TEQ:
141
- cpu_stq_mmuidx_ra(env, address, val, mmu_idx, GETPC());
142
- break;
143
- default:
144
- g_assert_not_reached();
145
- }
146
-
147
- return;
148
- }
149
-
150
- if (riscv_cpu_virt_enabled(env)) {
151
- riscv_raise_exception(env, RISCV_EXCP_VIRT_INSTRUCTION_FAULT, GETPC());
152
- } else {
153
- riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
154
- }
155
-}
156
-
157
target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address,
158
target_ulong attrs, target_ulong memop)
159
{
108
{
109
return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL;
160
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
110
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
161
index XXXXXXX..XXXXXXX 100644
111
index XXXXXXX..XXXXXXX 100644
162
--- a/target/riscv/translate.c
112
--- a/target/riscv/translate.c
163
+++ b/target/riscv/translate.c
113
+++ b/target/riscv/translate.c
114
@@ -XXX,XX +XXX,XX @@
115
#include "exec/helper-info.c.inc"
116
#undef HELPER_H
117
118
+#include "tcg/tcg-cpu.h"
119
+
120
/* global register indices */
121
static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
122
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
164
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
123
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
165
to reset this known value. */
124
/* FRM is known to contain a valid value. */
166
int frm;
125
bool frm_valid;
167
bool ext_ifencei;
126
bool insn_start_updated;
168
+ bool hlsx;
127
+ const GPtrArray *decoders;
169
/* vector extension */
128
} DisasContext;
170
bool vill;
129
171
uint8_t lmul;
130
static inline bool has_ext(DisasContext *ctx, uint32_t ext)
131
@@ -XXX,XX +XXX,XX @@ static inline int insn_len(uint16_t first_word)
132
return (first_word & 3) == 3 ? 4 : 2;
133
}
134
135
+const RISCVDecoder decoder_table[] = {
136
+ { always_true_p, decode_insn32 },
137
+ { has_xthead_p, decode_xthead},
138
+ { has_XVentanaCondOps_p, decode_XVentanaCodeOps},
139
+};
140
+
141
+const size_t decoder_table_size = ARRAY_SIZE(decoder_table);
142
+
143
static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
144
{
145
- /*
146
- * A table with predicate (i.e., guard) functions and decoder functions
147
- * that are tested in-order until a decoder matches onto the opcode.
148
- */
149
- static const struct {
150
- bool (*guard_func)(const RISCVCPUConfig *);
151
- bool (*decode_func)(DisasContext *, uint32_t);
152
- } decoders[] = {
153
- { always_true_p, decode_insn32 },
154
- { has_xthead_p, decode_xthead },
155
- { has_XVentanaCondOps_p, decode_XVentanaCodeOps },
156
- };
157
-
158
ctx->virt_inst_excp = false;
159
ctx->cur_insn_len = insn_len(opcode);
160
/* Check for compressed insn */
161
@@ -XXX,XX +XXX,XX @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
162
ctx->base.pc_next + 2));
163
ctx->opcode = opcode32;
164
165
- for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) {
166
- if (decoders[i].guard_func(ctx->cfg_ptr) &&
167
- decoders[i].decode_func(ctx, opcode32)) {
168
+ for (guint i = 0; i < ctx->decoders->len; ++i) {
169
+ riscv_cpu_decode_fn func = g_ptr_array_index(ctx->decoders, i);
170
+ if (func(ctx, opcode32)) {
171
return;
172
}
173
}
172
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
174
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
173
ctx->frm = -1; /* unknown rounding mode */
175
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
174
ctx->ext_ifencei = cpu->cfg.ext_ifencei;
176
ctx->zero = tcg_constant_tl(0);
175
ctx->vlen = cpu->cfg.vlen;
177
ctx->virt_inst_excp = false;
176
+ ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX);
178
+ ctx->decoders = cpu->decoders;
177
ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL);
179
}
178
ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW);
180
179
ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL);
181
static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
180
diff --git a/target/riscv/insn_trans/trans_rvh.c.inc b/target/riscv/insn_trans/trans_rvh.c.inc
181
index XXXXXXX..XXXXXXX 100644
182
--- a/target/riscv/insn_trans/trans_rvh.c.inc
183
+++ b/target/riscv/insn_trans/trans_rvh.c.inc
184
@@ -XXX,XX +XXX,XX @@
185
* this program. If not, see <http://www.gnu.org/licenses/>.
186
*/
187
188
+#ifndef CONFIG_USER_ONLY
189
+static void check_access(DisasContext *ctx) {
190
+ if (!ctx->hlsx) {
191
+ if (ctx->virt_enabled) {
192
+ generate_exception(ctx, RISCV_EXCP_VIRT_INSTRUCTION_FAULT);
193
+ } else {
194
+ generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
195
+ }
196
+ }
197
+}
198
+#endif
199
+
200
static bool trans_hlv_b(DisasContext *ctx, arg_hlv_b *a)
201
{
202
REQUIRE_EXT(ctx, RVH);
203
#ifndef CONFIG_USER_ONLY
204
TCGv t0 = tcg_temp_new();
205
TCGv t1 = tcg_temp_new();
206
- TCGv mem_idx = tcg_temp_new();
207
- TCGv memop = tcg_temp_new();
208
+
209
+ check_access(ctx);
210
211
gen_get_gpr(t0, a->rs1);
212
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
213
- tcg_gen_movi_tl(memop, MO_SB);
214
215
- gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
216
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB);
217
gen_set_gpr(a->rd, t1);
218
219
tcg_temp_free(t0);
220
tcg_temp_free(t1);
221
- tcg_temp_free(mem_idx);
222
- tcg_temp_free(memop);
223
return true;
224
#else
225
return false;
226
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_h(DisasContext *ctx, arg_hlv_h *a)
227
#ifndef CONFIG_USER_ONLY
228
TCGv t0 = tcg_temp_new();
229
TCGv t1 = tcg_temp_new();
230
- TCGv mem_idx = tcg_temp_new();
231
- TCGv memop = tcg_temp_new();
232
+
233
+ check_access(ctx);
234
235
gen_get_gpr(t0, a->rs1);
236
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
237
- tcg_gen_movi_tl(memop, MO_TESW);
238
239
- gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
240
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW);
241
gen_set_gpr(a->rd, t1);
242
243
tcg_temp_free(t0);
244
tcg_temp_free(t1);
245
- tcg_temp_free(mem_idx);
246
- tcg_temp_free(memop);
247
return true;
248
#else
249
return false;
250
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_w(DisasContext *ctx, arg_hlv_w *a)
251
#ifndef CONFIG_USER_ONLY
252
TCGv t0 = tcg_temp_new();
253
TCGv t1 = tcg_temp_new();
254
- TCGv mem_idx = tcg_temp_new();
255
- TCGv memop = tcg_temp_new();
256
+
257
+ check_access(ctx);
258
259
gen_get_gpr(t0, a->rs1);
260
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
261
- tcg_gen_movi_tl(memop, MO_TESL);
262
263
- gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
264
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL);
265
gen_set_gpr(a->rd, t1);
266
267
tcg_temp_free(t0);
268
tcg_temp_free(t1);
269
- tcg_temp_free(mem_idx);
270
- tcg_temp_free(memop);
271
return true;
272
#else
273
return false;
274
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_bu(DisasContext *ctx, arg_hlv_bu *a)
275
#ifndef CONFIG_USER_ONLY
276
TCGv t0 = tcg_temp_new();
277
TCGv t1 = tcg_temp_new();
278
- TCGv mem_idx = tcg_temp_new();
279
- TCGv memop = tcg_temp_new();
280
+
281
+ check_access(ctx);
282
283
gen_get_gpr(t0, a->rs1);
284
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
285
- tcg_gen_movi_tl(memop, MO_UB);
286
287
- gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
288
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_UB);
289
gen_set_gpr(a->rd, t1);
290
291
tcg_temp_free(t0);
292
tcg_temp_free(t1);
293
- tcg_temp_free(mem_idx);
294
- tcg_temp_free(memop);
295
return true;
296
#else
297
return false;
298
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_hu(DisasContext *ctx, arg_hlv_hu *a)
299
#ifndef CONFIG_USER_ONLY
300
TCGv t0 = tcg_temp_new();
301
TCGv t1 = tcg_temp_new();
302
- TCGv mem_idx = tcg_temp_new();
303
- TCGv memop = tcg_temp_new();
304
305
- gen_get_gpr(t0, a->rs1);
306
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
307
- tcg_gen_movi_tl(memop, MO_TEUW);
308
+ check_access(ctx);
309
310
- gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
311
+ gen_get_gpr(t0, a->rs1);
312
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUW);
313
gen_set_gpr(a->rd, t1);
314
315
tcg_temp_free(t0);
316
tcg_temp_free(t1);
317
- tcg_temp_free(mem_idx);
318
- tcg_temp_free(memop);
319
return true;
320
#else
321
return false;
322
@@ -XXX,XX +XXX,XX @@ static bool trans_hsv_b(DisasContext *ctx, arg_hsv_b *a)
323
#ifndef CONFIG_USER_ONLY
324
TCGv t0 = tcg_temp_new();
325
TCGv dat = tcg_temp_new();
326
- TCGv mem_idx = tcg_temp_new();
327
- TCGv memop = tcg_temp_new();
328
+
329
+ check_access(ctx);
330
331
gen_get_gpr(t0, a->rs1);
332
gen_get_gpr(dat, a->rs2);
333
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
334
- tcg_gen_movi_tl(memop, MO_SB);
335
336
- gen_helper_hyp_store(cpu_env, t0, dat, mem_idx, memop);
337
+ tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_SB);
338
339
tcg_temp_free(t0);
340
tcg_temp_free(dat);
341
- tcg_temp_free(mem_idx);
342
- tcg_temp_free(memop);
343
return true;
344
#else
345
return false;
346
@@ -XXX,XX +XXX,XX @@ static bool trans_hsv_h(DisasContext *ctx, arg_hsv_h *a)
347
#ifndef CONFIG_USER_ONLY
348
TCGv t0 = tcg_temp_new();
349
TCGv dat = tcg_temp_new();
350
- TCGv mem_idx = tcg_temp_new();
351
- TCGv memop = tcg_temp_new();
352
+
353
+ check_access(ctx);
354
355
gen_get_gpr(t0, a->rs1);
356
gen_get_gpr(dat, a->rs2);
357
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
358
- tcg_gen_movi_tl(memop, MO_TESW);
359
360
- gen_helper_hyp_store(cpu_env, t0, dat, mem_idx, memop);
361
+ tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESW);
362
363
tcg_temp_free(t0);
364
tcg_temp_free(dat);
365
- tcg_temp_free(mem_idx);
366
- tcg_temp_free(memop);
367
return true;
368
#else
369
return false;
370
@@ -XXX,XX +XXX,XX @@ static bool trans_hsv_w(DisasContext *ctx, arg_hsv_w *a)
371
#ifndef CONFIG_USER_ONLY
372
TCGv t0 = tcg_temp_new();
373
TCGv dat = tcg_temp_new();
374
- TCGv mem_idx = tcg_temp_new();
375
- TCGv memop = tcg_temp_new();
376
+
377
+ check_access(ctx);
378
379
gen_get_gpr(t0, a->rs1);
380
gen_get_gpr(dat, a->rs2);
381
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
382
- tcg_gen_movi_tl(memop, MO_TESL);
383
384
- gen_helper_hyp_store(cpu_env, t0, dat, mem_idx, memop);
385
+ tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TESL);
386
387
tcg_temp_free(t0);
388
tcg_temp_free(dat);
389
- tcg_temp_free(mem_idx);
390
- tcg_temp_free(memop);
391
return true;
392
#else
393
return false;
394
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_wu(DisasContext *ctx, arg_hlv_wu *a)
395
#ifndef CONFIG_USER_ONLY
396
TCGv t0 = tcg_temp_new();
397
TCGv t1 = tcg_temp_new();
398
- TCGv mem_idx = tcg_temp_new();
399
- TCGv memop = tcg_temp_new();
400
+
401
+ check_access(ctx);
402
403
gen_get_gpr(t0, a->rs1);
404
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
405
- tcg_gen_movi_tl(memop, MO_TEUL);
406
407
- gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
408
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEUL);
409
gen_set_gpr(a->rd, t1);
410
411
tcg_temp_free(t0);
412
tcg_temp_free(t1);
413
- tcg_temp_free(mem_idx);
414
- tcg_temp_free(memop);
415
return true;
416
#else
417
return false;
418
@@ -XXX,XX +XXX,XX @@ static bool trans_hlv_d(DisasContext *ctx, arg_hlv_d *a)
419
#ifndef CONFIG_USER_ONLY
420
TCGv t0 = tcg_temp_new();
421
TCGv t1 = tcg_temp_new();
422
- TCGv mem_idx = tcg_temp_new();
423
- TCGv memop = tcg_temp_new();
424
+
425
+ check_access(ctx);
426
427
gen_get_gpr(t0, a->rs1);
428
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
429
- tcg_gen_movi_tl(memop, MO_TEQ);
430
431
- gen_helper_hyp_load(t1, cpu_env, t0, mem_idx, memop);
432
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ);
433
gen_set_gpr(a->rd, t1);
434
435
tcg_temp_free(t0);
436
tcg_temp_free(t1);
437
- tcg_temp_free(mem_idx);
438
- tcg_temp_free(memop);
439
return true;
440
#else
441
return false;
442
@@ -XXX,XX +XXX,XX @@ static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a)
443
#ifndef CONFIG_USER_ONLY
444
TCGv t0 = tcg_temp_new();
445
TCGv dat = tcg_temp_new();
446
- TCGv mem_idx = tcg_temp_new();
447
- TCGv memop = tcg_temp_new();
448
+
449
+ check_access(ctx);
450
451
gen_get_gpr(t0, a->rs1);
452
gen_get_gpr(dat, a->rs2);
453
- tcg_gen_movi_tl(mem_idx, ctx->mem_idx);
454
- tcg_gen_movi_tl(memop, MO_TEQ);
455
456
- gen_helper_hyp_store(cpu_env, t0, dat, mem_idx, memop);
457
+ tcg_gen_qemu_st_tl(dat, t0, ctx->mem_idx | TB_FLAGS_PRIV_HYP_ACCESS_MASK, MO_TEQ);
458
459
tcg_temp_free(t0);
460
tcg_temp_free(dat);
461
- tcg_temp_free(mem_idx);
462
- tcg_temp_free(memop);
463
return true;
464
#else
465
return false;
466
--
182
--
467
2.29.2
183
2.45.1
468
469
diff view generated by jsdifflib
1
Add a new MMU mode that includes the current virt mode.
1
From: Christoph Müllner <christoph.muellner@vrull.eu>
2
2
3
The th.sxstatus CSR can be used to identify available custom extension
4
on T-Head CPUs. The CSR is documented here:
5
https://github.com/T-head-Semi/thead-extension-spec/blob/master/xtheadsxstatus.adoc
6
7
An important property of this patch is, that the th.sxstatus MAEE field
8
is not set (indicating that XTheadMae is not available).
9
XTheadMae is a memory attribute extension (similar to Svpbmt) which is
10
implemented in many T-Head CPUs (C906, C910, etc.) and utilizes bits
11
in PTEs that are marked as reserved. QEMU maintainers prefer to not
12
implement XTheadMae, so we need give kernels a mechanism to identify
13
if XTheadMae is available in a system or not. And this patch introduces
14
this mechanism in QEMU in a way that's compatible with real HW
15
(i.e., probing the th.sxstatus.MAEE bit).
16
17
Further context can be found on the list:
18
https://lists.gnu.org/archive/html/qemu-devel/2024-02/msg00775.html
19
20
Reviewed-by: LIU Zhiwei <zhiwe_liu@linux.alibaba.com>
21
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
22
Signed-off-by: Christoph Müllner <christoph.muellner@vrull.eu>
23
Message-ID: <20240429073656.2486732-1-christoph.muellner@vrull.eu>
3
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
24
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Message-id: 4b301bc0ea36da962fc1605371b65019ac3073df.1604464950.git.alistair.francis@wdc.com
6
---
25
---
7
target/riscv/cpu-param.h | 11 ++++++++++-
26
MAINTAINERS | 1 +
8
target/riscv/cpu.h | 4 +++-
27
target/riscv/cpu.h | 3 ++
9
target/riscv/cpu_helper.c | 2 +-
28
target/riscv/cpu.c | 1 +
10
3 files changed, 14 insertions(+), 3 deletions(-)
29
target/riscv/th_csr.c | 79 ++++++++++++++++++++++++++++++++++++++++
30
target/riscv/meson.build | 1 +
31
5 files changed, 85 insertions(+)
32
create mode 100644 target/riscv/th_csr.c
11
33
12
diff --git a/target/riscv/cpu-param.h b/target/riscv/cpu-param.h
34
diff --git a/MAINTAINERS b/MAINTAINERS
13
index XXXXXXX..XXXXXXX 100644
35
index XXXXXXX..XXXXXXX 100644
14
--- a/target/riscv/cpu-param.h
36
--- a/MAINTAINERS
15
+++ b/target/riscv/cpu-param.h
37
+++ b/MAINTAINERS
16
@@ -XXX,XX +XXX,XX @@
38
@@ -XXX,XX +XXX,XX @@ L: qemu-riscv@nongnu.org
17
# define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */
39
S: Supported
18
#endif
40
F: target/riscv/insn_trans/trans_xthead.c.inc
19
#define TARGET_PAGE_BITS 12 /* 4 KiB Pages */
41
F: target/riscv/xthead*.decode
20
-#define NB_MMU_MODES 4
42
+F: target/riscv/th_*
21
+/*
43
F: disas/riscv-xthead*
22
+ * The current MMU Modes are:
44
23
+ * - U mode 0b000
45
RISC-V XVentanaCondOps extension
24
+ * - S mode 0b001
25
+ * - M mode 0b011
26
+ * - U mode HLV/HLVX/HSV 0b100
27
+ * - S mode HLV/HLVX/HSV 0b101
28
+ * - M mode HLV/HLVX/HSV 0b111
29
+ */
30
+#define NB_MMU_MODES 8
31
32
#endif
33
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
46
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
34
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
35
--- a/target/riscv/cpu.h
48
--- a/target/riscv/cpu.h
36
+++ b/target/riscv/cpu.h
49
+++ b/target/riscv/cpu.h
37
@@ -XXX,XX +XXX,XX @@ void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env,
50
@@ -XXX,XX +XXX,XX @@ target_ulong riscv_new_csr_seed(target_ulong new_value,
38
target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
51
uint8_t satp_mode_max_from_map(uint32_t map);
39
void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
52
const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit);
40
53
41
-#define TB_FLAGS_MMU_MASK 3
54
+/* Implemented in th_csr.c */
42
+#define TB_FLAGS_MMU_MASK 7
55
+void th_register_custom_csrs(RISCVCPU *cpu);
43
+#define TB_FLAGS_PRIV_MMU_MASK 3
56
+
44
+#define TB_FLAGS_PRIV_HYP_ACCESS_MASK (1 << 2)
57
#endif /* RISCV_CPU_H */
45
#define TB_FLAGS_MSTATUS_FS MSTATUS_FS
58
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
46
47
typedef CPURISCVState CPUArchState;
48
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
49
index XXXXXXX..XXXXXXX 100644
59
index XXXXXXX..XXXXXXX 100644
50
--- a/target/riscv/cpu_helper.c
60
--- a/target/riscv/cpu.c
51
+++ b/target/riscv/cpu_helper.c
61
+++ b/target/riscv/cpu.c
52
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
62
@@ -XXX,XX +XXX,XX @@ static void rv64_thead_c906_cpu_init(Object *obj)
53
* (riscv_cpu_do_interrupt) is correct */
63
cpu->cfg.mvendorid = THEAD_VENDOR_ID;
54
MemTxResult res;
64
#ifndef CONFIG_USER_ONLY
55
MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
65
set_satp_mode_max_supported(cpu, VM_1_10_SV39);
56
- int mode = mmu_idx;
66
+ th_register_custom_csrs(cpu);
57
+ int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
67
#endif
58
bool use_background = false;
68
59
69
/* inherited from parent obj via riscv_cpu_init() */
60
/*
70
diff --git a/target/riscv/th_csr.c b/target/riscv/th_csr.c
71
new file mode 100644
72
index XXXXXXX..XXXXXXX
73
--- /dev/null
74
+++ b/target/riscv/th_csr.c
75
@@ -XXX,XX +XXX,XX @@
76
+/*
77
+ * T-Head-specific CSRs.
78
+ *
79
+ * Copyright (c) 2024 VRULL GmbH
80
+ *
81
+ * This program is free software; you can redistribute it and/or modify it
82
+ * under the terms and conditions of the GNU General Public License,
83
+ * version 2 or later, as published by the Free Software Foundation.
84
+ *
85
+ * This program is distributed in the hope it will be useful, but WITHOUT
86
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
87
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
88
+ * more details.
89
+ *
90
+ * You should have received a copy of the GNU General Public License along with
91
+ * this program. If not, see <http://www.gnu.org/licenses/>.
92
+ */
93
+
94
+#include "qemu/osdep.h"
95
+#include "cpu.h"
96
+#include "cpu_vendorid.h"
97
+
98
+#define CSR_TH_SXSTATUS 0x5c0
99
+
100
+/* TH_SXSTATUS bits */
101
+#define TH_SXSTATUS_UCME BIT(16)
102
+#define TH_SXSTATUS_MAEE BIT(21)
103
+#define TH_SXSTATUS_THEADISAEE BIT(22)
104
+
105
+typedef struct {
106
+ int csrno;
107
+ int (*insertion_test)(RISCVCPU *cpu);
108
+ riscv_csr_operations csr_ops;
109
+} riscv_csr;
110
+
111
+static RISCVException smode(CPURISCVState *env, int csrno)
112
+{
113
+ if (riscv_has_ext(env, RVS)) {
114
+ return RISCV_EXCP_NONE;
115
+ }
116
+
117
+ return RISCV_EXCP_ILLEGAL_INST;
118
+}
119
+
120
+static int test_thead_mvendorid(RISCVCPU *cpu)
121
+{
122
+ if (cpu->cfg.mvendorid != THEAD_VENDOR_ID) {
123
+ return -1;
124
+ }
125
+
126
+ return 0;
127
+}
128
+
129
+static RISCVException read_th_sxstatus(CPURISCVState *env, int csrno,
130
+ target_ulong *val)
131
+{
132
+ /* We don't set MAEE here, because QEMU does not implement MAEE. */
133
+ *val = TH_SXSTATUS_UCME | TH_SXSTATUS_THEADISAEE;
134
+ return RISCV_EXCP_NONE;
135
+}
136
+
137
+static riscv_csr th_csr_list[] = {
138
+ {
139
+ .csrno = CSR_TH_SXSTATUS,
140
+ .insertion_test = test_thead_mvendorid,
141
+ .csr_ops = { "th.sxstatus", smode, read_th_sxstatus }
142
+ }
143
+};
144
+
145
+void th_register_custom_csrs(RISCVCPU *cpu)
146
+{
147
+ for (size_t i = 0; i < ARRAY_SIZE(th_csr_list); i++) {
148
+ int csrno = th_csr_list[i].csrno;
149
+ riscv_csr_operations *csr_ops = &th_csr_list[i].csr_ops;
150
+ if (!th_csr_list[i].insertion_test(cpu)) {
151
+ riscv_set_csr_ops(csrno, csr_ops);
152
+ }
153
+ }
154
+}
155
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
156
index XXXXXXX..XXXXXXX 100644
157
--- a/target/riscv/meson.build
158
+++ b/target/riscv/meson.build
159
@@ -XXX,XX +XXX,XX @@ riscv_system_ss.add(files(
160
'monitor.c',
161
'machine.c',
162
'pmu.c',
163
+ 'th_csr.c',
164
'time_helper.c',
165
'riscv-qmp-cmds.c',
166
))
61
--
167
--
62
2.29.2
168
2.45.1
63
169
64
170
diff view generated by jsdifflib
New patch
1
From: Max Chou <max.chou@sifive.com>
1
2
3
According v spec 18.4, only the vfwcvt.f.f.v and vfncvt.f.f.w
4
instructions will be affected by Zvfhmin extension.
5
And the vfwcvt.f.f.v and vfncvt.f.f.w instructions only support the
6
conversions of
7
8
* From 1*SEW(16/32) to 2*SEW(32/64)
9
* From 2*SEW(32/64) to 1*SEW(16/32)
10
11
Signed-off-by: Max Chou <max.chou@sifive.com>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Cc: qemu-stable <qemu-stable@nongnu.org>
14
Message-ID: <20240322092600.1198921-2-max.chou@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/insn_trans/trans_rvv.c.inc | 20 ++++++++++++++++++--
18
1 file changed, 18 insertions(+), 2 deletions(-)
19
20
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/insn_trans/trans_rvv.c.inc
23
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
24
@@ -XXX,XX +XXX,XX @@ static bool require_rvf(DisasContext *s)
25
}
26
}
27
28
+static bool require_rvfmin(DisasContext *s)
29
+{
30
+ if (s->mstatus_fs == EXT_STATUS_DISABLED) {
31
+ return false;
32
+ }
33
+
34
+ switch (s->sew) {
35
+ case MO_16:
36
+ return s->cfg_ptr->ext_zvfhmin;
37
+ case MO_32:
38
+ return s->cfg_ptr->ext_zve32f;
39
+ default:
40
+ return false;
41
+ }
42
+}
43
+
44
static bool require_scale_rvf(DisasContext *s)
45
{
46
if (s->mstatus_fs == EXT_STATUS_DISABLED) {
47
@@ -XXX,XX +XXX,XX @@ static bool require_scale_rvfmin(DisasContext *s)
48
}
49
50
switch (s->sew) {
51
- case MO_8:
52
- return s->cfg_ptr->ext_zvfhmin;
53
case MO_16:
54
return s->cfg_ptr->ext_zve32f;
55
case MO_32:
56
@@ -XXX,XX +XXX,XX @@ static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
57
static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
58
{
59
return opfv_widen_check(s, a) &&
60
+ require_rvfmin(s) &&
61
require_scale_rvfmin(s) &&
62
(s->sew != MO_8);
63
}
64
@@ -XXX,XX +XXX,XX @@ static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
65
static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
66
{
67
return opfv_narrow_check(s, a) &&
68
+ require_rvfmin(s) &&
69
require_scale_rvfmin(s) &&
70
(s->sew != MO_8);
71
}
72
--
73
2.45.1
diff view generated by jsdifflib
New patch
1
From: Max Chou <max.chou@sifive.com>
1
2
3
The require_scale_rvf function only checks the double width operator for
4
the vector floating point widen instructions, so most of the widen
5
checking functions need to add require_rvf for single width operator.
6
7
The vfwcvt.f.x.v and vfwcvt.f.xu.v instructions convert single width
8
integer to double width float, so the opfxv_widen_check function doesn’t
9
need require_rvf for the single width operator(integer).
10
11
Signed-off-by: Max Chou <max.chou@sifive.com>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Cc: qemu-stable <qemu-stable@nongnu.org>
14
Message-ID: <20240322092600.1198921-3-max.chou@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/insn_trans/trans_rvv.c.inc | 5 +++++
18
1 file changed, 5 insertions(+)
19
20
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/insn_trans/trans_rvv.c.inc
23
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
24
@@ -XXX,XX +XXX,XX @@ GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
25
static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
26
{
27
return require_rvv(s) &&
28
+ require_rvf(s) &&
29
require_scale_rvf(s) &&
30
(s->sew != MO_8) &&
31
vext_check_isa_ill(s) &&
32
@@ -XXX,XX +XXX,XX @@ GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
33
static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
34
{
35
return require_rvv(s) &&
36
+ require_rvf(s) &&
37
require_scale_rvf(s) &&
38
(s->sew != MO_8) &&
39
vext_check_isa_ill(s) &&
40
@@ -XXX,XX +XXX,XX @@ GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
41
static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
42
{
43
return require_rvv(s) &&
44
+ require_rvf(s) &&
45
require_scale_rvf(s) &&
46
(s->sew != MO_8) &&
47
vext_check_isa_ill(s) &&
48
@@ -XXX,XX +XXX,XX @@ GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
49
static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
50
{
51
return require_rvv(s) &&
52
+ require_rvf(s) &&
53
require_scale_rvf(s) &&
54
(s->sew != MO_8) &&
55
vext_check_isa_ill(s) &&
56
@@ -XXX,XX +XXX,XX @@ GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
57
static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
58
{
59
return reduction_widen_check(s, a) &&
60
+ require_rvf(s) &&
61
require_scale_rvf(s) &&
62
(s->sew != MO_8);
63
}
64
--
65
2.45.1
66
67
diff view generated by jsdifflib
New patch
1
From: Max Chou <max.chou@sifive.com>
1
2
3
The opfv_narrow_check needs to check the single width float operator by
4
require_rvf.
5
6
Signed-off-by: Max Chou <max.chou@sifive.com>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Cc: qemu-stable <qemu-stable@nongnu.org>
9
Message-ID: <20240322092600.1198921-4-max.chou@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/insn_trans/trans_rvv.c.inc | 1 +
13
1 file changed, 1 insertion(+)
14
15
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/insn_trans/trans_rvv.c.inc
18
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
19
@@ -XXX,XX +XXX,XX @@ static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
20
static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
21
{
22
return opfv_narrow_check(s, a) &&
23
+ require_rvf(s) &&
24
require_scale_rvf(s) &&
25
(s->sew != MO_8);
26
}
27
--
28
2.45.1
diff view generated by jsdifflib
New patch
1
From: Max Chou <max.chou@sifive.com>
1
2
3
If the checking functions check both the single and double width
4
operators at the same time, then the single width operator checking
5
functions (require_rvf[min]) will check whether the SEW is 8.
6
7
Signed-off-by: Max Chou <max.chou@sifive.com>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Cc: qemu-stable <qemu-stable@nongnu.org>
10
Message-ID: <20240322092600.1198921-5-max.chou@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/insn_trans/trans_rvv.c.inc | 16 ++++------------
14
1 file changed, 4 insertions(+), 12 deletions(-)
15
16
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/insn_trans/trans_rvv.c.inc
19
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
20
@@ -XXX,XX +XXX,XX @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
21
return require_rvv(s) &&
22
require_rvf(s) &&
23
require_scale_rvf(s) &&
24
- (s->sew != MO_8) &&
25
vext_check_isa_ill(s) &&
26
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
27
}
28
@@ -XXX,XX +XXX,XX @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
29
return require_rvv(s) &&
30
require_rvf(s) &&
31
require_scale_rvf(s) &&
32
- (s->sew != MO_8) &&
33
vext_check_isa_ill(s) &&
34
vext_check_ds(s, a->rd, a->rs2, a->vm);
35
}
36
@@ -XXX,XX +XXX,XX @@ static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
37
return require_rvv(s) &&
38
require_rvf(s) &&
39
require_scale_rvf(s) &&
40
- (s->sew != MO_8) &&
41
vext_check_isa_ill(s) &&
42
vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
43
}
44
@@ -XXX,XX +XXX,XX @@ static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
45
return require_rvv(s) &&
46
require_rvf(s) &&
47
require_scale_rvf(s) &&
48
- (s->sew != MO_8) &&
49
vext_check_isa_ill(s) &&
50
vext_check_dd(s, a->rd, a->rs2, a->vm);
51
}
52
@@ -XXX,XX +XXX,XX @@ static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
53
{
54
return opfv_widen_check(s, a) &&
55
require_rvfmin(s) &&
56
- require_scale_rvfmin(s) &&
57
- (s->sew != MO_8);
58
+ require_scale_rvfmin(s);
59
}
60
61
#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
62
@@ -XXX,XX +XXX,XX @@ static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
63
{
64
return opfv_narrow_check(s, a) &&
65
require_rvfmin(s) &&
66
- require_scale_rvfmin(s) &&
67
- (s->sew != MO_8);
68
+ require_scale_rvfmin(s);
69
}
70
71
static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
72
{
73
return opfv_narrow_check(s, a) &&
74
require_rvf(s) &&
75
- require_scale_rvf(s) &&
76
- (s->sew != MO_8);
77
+ require_scale_rvf(s);
78
}
79
80
#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
81
@@ -XXX,XX +XXX,XX @@ static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
82
{
83
return reduction_widen_check(s, a) &&
84
require_rvf(s) &&
85
- require_scale_rvf(s) &&
86
- (s->sew != MO_8);
87
+ require_scale_rvf(s);
88
}
89
90
GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
91
--
92
2.45.1
diff view generated by jsdifflib
1
The HS_TWO_STAGE flag is no longer required as the MMU index contains
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
the information if we are performing a two stage access.
3
2
3
raise_mmu_exception(), as is today, is prioritizing guest page faults by
4
checking first if virt_enabled && !first_stage, and then considering the
5
regular inst/load/store faults.
6
7
There's no mention in the spec about guest page fault being a higher
8
priority that PMP faults. In fact, privileged spec section 3.7.1 says:
9
10
"Attempting to fetch an instruction from a PMP region that does not have
11
execute permissions raises an instruction access-fault exception.
12
Attempting to execute a load or load-reserved instruction which accesses
13
a physical address within a PMP region without read permissions raises a
14
load access-fault exception. Attempting to execute a store,
15
store-conditional, or AMO instruction which accesses a physical address
16
within a PMP region without write permissions raises a store
17
access-fault exception."
18
19
So, in fact, we're doing it wrong - PMP faults should always be thrown,
20
regardless of also being a first or second stage fault.
21
22
The way riscv_cpu_tlb_fill() and get_physical_address() work is
23
adequate: a TRANSLATE_PMP_FAIL error is immediately reported and
24
reflected in the 'pmp_violation' flag. What we need is to change
25
raise_mmu_exception() to prioritize it.
26
27
Reported-by: Joseph Chan <jchan@ventanamicro.com>
28
Fixes: 82d53adfbb ("target/riscv/cpu_helper.c: Invalid exception on MMU translation stage")
29
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
30
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
31
Message-ID: <20240413105929.7030-1-alexei.filippov@syntacore.com>
32
Cc: qemu-stable <qemu-stable@nongnu.org>
4
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
33
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: f514b128b1ff0fb41c85f914cee18f905007a922.1604464950.git.alistair.francis@wdc.com
7
---
34
---
8
target/riscv/cpu.h | 3 +-
35
target/riscv/cpu_helper.c | 22 ++++++++++++----------
9
target/riscv/cpu_bits.h | 1 -
36
1 file changed, 12 insertions(+), 10 deletions(-)
10
target/riscv/cpu_helper.c | 60 ++++++++++++++++-----------------------
11
target/riscv/op_helper.c | 12 --------
12
4 files changed, 25 insertions(+), 51 deletions(-)
13
37
14
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
15
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/cpu.h
17
+++ b/target/riscv/cpu.h
18
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_virt_enabled(CPURISCVState *env);
19
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
20
bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env);
21
void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable);
22
-bool riscv_cpu_two_stage_lookup(CPURISCVState *env);
23
-void riscv_cpu_set_two_stage_lookup(CPURISCVState *env, bool enable);
24
+bool riscv_cpu_two_stage_lookup(int mmu_idx);
25
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
26
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
27
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
28
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/riscv/cpu_bits.h
31
+++ b/target/riscv/cpu_bits.h
32
@@ -XXX,XX +XXX,XX @@
33
* page table fault.
34
*/
35
#define FORCE_HS_EXCEP 2
36
-#define HS_TWO_STAGE 4
37
38
/* RV32 satp CSR field masks */
39
#define SATP32_MODE 0x80000000
40
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
38
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
41
index XXXXXXX..XXXXXXX 100644
39
index XXXXXXX..XXXXXXX 100644
42
--- a/target/riscv/cpu_helper.c
40
--- a/target/riscv/cpu_helper.c
43
+++ b/target/riscv/cpu_helper.c
41
+++ b/target/riscv/cpu_helper.c
44
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable)
45
env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable);
46
}
47
48
-bool riscv_cpu_two_stage_lookup(CPURISCVState *env)
49
+bool riscv_cpu_two_stage_lookup(int mmu_idx)
50
{
51
- if (!riscv_has_ext(env, RVH)) {
52
- return false;
53
- }
54
-
55
- return get_field(env->virt, HS_TWO_STAGE);
56
-}
57
-
58
-void riscv_cpu_set_two_stage_lookup(CPURISCVState *env, bool enable)
59
-{
60
- if (!riscv_has_ext(env, RVH)) {
61
- return;
62
- }
63
-
64
- env->virt = set_field(env->virt, HS_TWO_STAGE, enable);
65
+ return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
66
}
67
68
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
69
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
70
* was called. Background registers will be used if the guest has
71
* forced a two stage translation to be on (in HS or M mode).
72
*/
73
- if (riscv_cpu_two_stage_lookup(env) && access_type != MMU_INST_FETCH) {
74
+ if (!riscv_cpu_virt_enabled(env) && riscv_cpu_two_stage_lookup(mmu_idx)) {
75
use_background = true;
76
}
77
78
@@ -XXX,XX +XXX,XX @@ restart:
79
80
static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
81
MMUAccessType access_type, bool pmp_violation,
82
- bool first_stage)
83
+ bool first_stage, bool two_stage)
84
{
85
CPUState *cs = env_cpu(env);
86
int page_fault_exceptions;
87
@@ -XXX,XX +XXX,XX @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
42
@@ -XXX,XX +XXX,XX @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
43
44
switch (access_type) {
45
case MMU_INST_FETCH:
46
- if (env->virt_enabled && !first_stage) {
47
+ if (pmp_violation) {
48
+ cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
49
+ } else if (env->virt_enabled && !first_stage) {
50
cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
51
} else {
52
- cs->exception_index = pmp_violation ?
53
- RISCV_EXCP_INST_ACCESS_FAULT : RISCV_EXCP_INST_PAGE_FAULT;
54
+ cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
88
}
55
}
89
break;
56
break;
90
case MMU_DATA_LOAD:
57
case MMU_DATA_LOAD:
91
- if ((riscv_cpu_virt_enabled(env) || riscv_cpu_two_stage_lookup(env)) &&
58
- if (two_stage && !first_stage) {
92
- !first_stage) {
59
+ if (pmp_violation) {
93
+ if (two_stage && !first_stage) {
60
+ cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
61
+ } else if (two_stage && !first_stage) {
94
cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
62
cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
95
} else {
63
} else {
96
cs->exception_index = page_fault_exceptions ?
64
- cs->exception_index = pmp_violation ?
97
@@ -XXX,XX +XXX,XX @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
65
- RISCV_EXCP_LOAD_ACCESS_FAULT : RISCV_EXCP_LOAD_PAGE_FAULT;
66
+ cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
98
}
67
}
99
break;
68
break;
100
case MMU_DATA_STORE:
69
case MMU_DATA_STORE:
101
- if ((riscv_cpu_virt_enabled(env) || riscv_cpu_two_stage_lookup(env)) &&
70
- if (two_stage && !first_stage) {
102
- !first_stage) {
71
+ if (pmp_violation) {
103
+ if (two_stage && !first_stage) {
72
+ cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
73
+ } else if (two_stage && !first_stage) {
104
cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
74
cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
105
} else {
75
} else {
106
cs->exception_index = page_fault_exceptions ?
76
- cs->exception_index = pmp_violation ?
107
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
77
- RISCV_EXCP_STORE_AMO_ACCESS_FAULT :
108
int prot, prot2;
78
- RISCV_EXCP_STORE_PAGE_FAULT;
109
bool pmp_violation = false;
79
+ cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
110
bool first_stage_error = true;
111
+ bool two_stage_lookup = false;
112
int ret = TRANSLATE_FAIL;
113
int mode = mmu_idx;
114
target_ulong tlb_size = 0;
115
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
116
access_type != MMU_INST_FETCH &&
117
get_field(env->mstatus, MSTATUS_MPRV) &&
118
get_field(env->mstatus, MSTATUS_MPV)) {
119
- riscv_cpu_set_two_stage_lookup(env, true);
120
+ two_stage_lookup = true;
121
}
122
123
if (riscv_cpu_virt_enabled(env) ||
124
- (riscv_cpu_two_stage_lookup(env) && access_type != MMU_INST_FETCH)) {
125
+ ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
126
+ access_type != MMU_INST_FETCH)) {
127
/* Two stage lookup */
128
ret = get_physical_address(env, &pa, &prot, address,
129
&env->guest_phys_fault_addr, access_type,
130
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
131
__func__, address, ret, pa, prot);
132
}
133
134
- /* We did the two stage lookup based on MPRV, unset the lookup */
135
- if (riscv_has_ext(env, RVH) && env->priv == PRV_M &&
136
- access_type != MMU_INST_FETCH &&
137
- get_field(env->mstatus, MSTATUS_MPRV) &&
138
- get_field(env->mstatus, MSTATUS_MPV)) {
139
- riscv_cpu_set_two_stage_lookup(env, false);
140
- }
141
-
142
if (riscv_feature(env, RISCV_FEATURE_PMP) &&
143
(ret == TRANSLATE_SUCCESS) &&
144
!pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) {
145
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
146
} else if (probe) {
147
return false;
148
} else {
149
- raise_mmu_exception(env, address, access_type, pmp_violation, first_stage_error);
150
+ raise_mmu_exception(env, address, access_type, pmp_violation,
151
+ first_stage_error,
152
+ riscv_cpu_virt_enabled(env) ||
153
+ riscv_cpu_two_stage_lookup(mmu_idx));
154
riscv_raise_exception(env, cs->exception_index, retaddr);
155
}
156
157
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
158
/* handle the trap in S-mode */
159
if (riscv_has_ext(env, RVH)) {
160
target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
161
+ bool two_stage_lookup = false;
162
+
163
+ if (env->priv == PRV_M ||
164
+ (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
165
+ (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
166
+ get_field(env->hstatus, HSTATUS_HU))) {
167
+ two_stage_lookup = true;
168
+ }
169
170
- if ((riscv_cpu_virt_enabled(env) ||
171
- riscv_cpu_two_stage_lookup(env)) && write_tval) {
172
+ if ((riscv_cpu_virt_enabled(env) || two_stage_lookup) && write_tval) {
173
/*
174
* If we are writing a guest virtual address to stval, set
175
* this to 1. If we are trapping to VS we will set this to 0
176
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
177
riscv_cpu_set_force_hs_excep(env, 0);
178
} else {
179
/* Trap into HS mode */
180
- if (!riscv_cpu_two_stage_lookup(env)) {
181
+ if (!two_stage_lookup) {
182
env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
183
riscv_cpu_virt_enabled(env));
184
}
185
- riscv_cpu_set_two_stage_lookup(env, false);
186
htval = env->guest_phys_fault_addr;
187
}
188
}
80
}
189
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
81
break;
190
index XXXXXXX..XXXXXXX 100644
82
default:
191
--- a/target/riscv/op_helper.c
192
+++ b/target/riscv/op_helper.c
193
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_load(CPURISCVState *env, target_ulong address,
194
target_ulong pte;
195
int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
196
197
- riscv_cpu_set_two_stage_lookup(env, true);
198
-
199
switch (memop) {
200
case MO_SB:
201
pte = cpu_ldsb_mmuidx_ra(env, address, mmu_idx, GETPC());
202
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_load(CPURISCVState *env, target_ulong address,
203
g_assert_not_reached();
204
}
205
206
- riscv_cpu_set_two_stage_lookup(env, false);
207
-
208
return pte;
209
}
210
211
@@ -XXX,XX +XXX,XX @@ void helper_hyp_store(CPURISCVState *env, target_ulong address,
212
get_field(env->hstatus, HSTATUS_HU))) {
213
int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
214
215
- riscv_cpu_set_two_stage_lookup(env, true);
216
-
217
switch (memop) {
218
case MO_SB:
219
case MO_UB:
220
@@ -XXX,XX +XXX,XX @@ void helper_hyp_store(CPURISCVState *env, target_ulong address,
221
g_assert_not_reached();
222
}
223
224
- riscv_cpu_set_two_stage_lookup(env, false);
225
-
226
return;
227
}
228
229
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address,
230
target_ulong pte;
231
int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
232
233
- riscv_cpu_set_two_stage_lookup(env, true);
234
-
235
switch (memop) {
236
case MO_TEUW:
237
pte = cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
238
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address,
239
g_assert_not_reached();
240
}
241
242
- riscv_cpu_set_two_stage_lookup(env, false);
243
-
244
return pte;
245
}
246
247
--
83
--
248
2.29.2
84
2.45.1
249
250
diff view generated by jsdifflib
New patch
1
From: Alexei Filippov <alexei.filippov@syntacore.com>
1
2
3
Previous patch fixed the PMP priority in raise_mmu_exception() but we're still
4
setting mtval2 incorrectly. In riscv_cpu_tlb_fill(), after pmp check in 2 stage
5
translation part, mtval2 will be set in case of successes 2 stage translation but
6
failed pmp check.
7
8
In this case we gonna set mtval2 via env->guest_phys_fault_addr in context of
9
riscv_cpu_tlb_fill(), as this was a guest-page-fault, but it didn't and mtval2
10
should be zero, according to RISCV privileged spec sect. 9.4.4: When a guest
11
page-fault is taken into M-mode, mtval2 is written with either zero or guest
12
physical address that faulted, shifted by 2 bits. *For other traps, mtval2
13
is set to zero...*
14
15
Signed-off-by: Alexei Filippov <alexei.filippov@syntacore.com>
16
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
17
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
18
Message-ID: <20240503103052.6819-1-alexei.filippov@syntacore.com>
19
Cc: qemu-stable <qemu-stable@nongnu.org>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
---
22
target/riscv/cpu_helper.c | 12 ++++++------
23
1 file changed, 6 insertions(+), 6 deletions(-)
24
25
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/riscv/cpu_helper.c
28
+++ b/target/riscv/cpu_helper.c
29
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
30
__func__, pa, ret, prot_pmp, tlb_size);
31
32
prot &= prot_pmp;
33
- }
34
-
35
- if (ret != TRANSLATE_SUCCESS) {
36
+ } else {
37
/*
38
* Guest physical address translation failed, this is a HS
39
* level exception
40
*/
41
first_stage_error = false;
42
- env->guest_phys_fault_addr = (im_address |
43
- (address &
44
- (TARGET_PAGE_SIZE - 1))) >> 2;
45
+ if (ret != TRANSLATE_PMP_FAIL) {
46
+ env->guest_phys_fault_addr = (im_address |
47
+ (address &
48
+ (TARGET_PAGE_SIZE - 1))) >> 2;
49
+ }
50
}
51
}
52
} else {
53
--
54
2.45.1
diff view generated by jsdifflib
New patch
1
From: Rob Bradford <rbradford@rivosinc.com>
1
2
3
This extension has now been ratified:
4
https://jira.riscv.org/browse/RVS-2006 so the "x-" prefix can be
5
removed.
6
7
Since this is now a ratified extension add it to the list of extensions
8
included in the "max" CPU variant.
9
10
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
11
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
15
Message-ID: <20240514110217.22516-1-rbradford@rivosinc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
18
target/riscv/cpu.c | 2 +-
19
target/riscv/tcg/tcg-cpu.c | 2 +-
20
2 files changed, 2 insertions(+), 2 deletions(-)
21
22
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/cpu.c
25
+++ b/target/riscv/cpu.c
26
@@ -XXX,XX +XXX,XX @@ static const MISAExtInfo misa_ext_info_arr[] = {
27
MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
28
MISA_EXT_INFO(RVV, "v", "Vector operations"),
29
MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
30
- MISA_EXT_INFO(RVB, "x-b", "Bit manipulation (Zba_Zbb_Zbs)")
31
+ MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
32
};
33
34
static void riscv_cpu_validate_misa_mxl(RISCVCPUClass *mcc)
35
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/riscv/tcg/tcg-cpu.c
38
+++ b/target/riscv/tcg/tcg-cpu.c
39
@@ -XXX,XX +XXX,XX @@ static void riscv_init_max_cpu_extensions(Object *obj)
40
const RISCVCPUMultiExtConfig *prop;
41
42
/* Enable RVG, RVJ and RVV that are disabled by default */
43
- riscv_cpu_set_misa_ext(env, env->misa_ext | RVG | RVJ | RVV);
44
+ riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV);
45
46
for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
47
isa_ext_update_enabled(cpu, prop->offset, true);
48
--
49
2.45.1
diff view generated by jsdifflib
New patch
1
From: Alistair Francis <alistair23@gmail.com>
1
2
3
When running the instruction
4
5
```
6
cbo.flush 0(x0)
7
```
8
9
QEMU would segfault.
10
11
The issue was in cpu_gpr[a->rs1] as QEMU does not have cpu_gpr[0]
12
allocated.
13
14
In order to fix this let's use the existing get_address()
15
helper. This also has the benefit of performing pointer mask
16
calculations on the address specified in rs1.
17
18
The pointer masking specificiation specifically states:
19
20
"""
21
Cache Management Operations: All instructions in Zicbom, Zicbop and Zicboz
22
"""
23
24
So this is the correct behaviour and we previously have been incorrectly
25
not masking the address.
26
27
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
28
Reported-by: Fabian Thomas <fabian.thomas@cispa.de>
29
Fixes: e05da09b7cfd ("target/riscv: implement Zicbom extension")
30
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
31
Cc: qemu-stable <qemu-stable@nongnu.org>
32
Message-ID: <20240514023910.301766-1-alistair.francis@wdc.com>
33
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
34
---
35
target/riscv/insn_trans/trans_rvzicbo.c.inc | 16 ++++++++++++----
36
1 file changed, 12 insertions(+), 4 deletions(-)
37
38
diff --git a/target/riscv/insn_trans/trans_rvzicbo.c.inc b/target/riscv/insn_trans/trans_rvzicbo.c.inc
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/riscv/insn_trans/trans_rvzicbo.c.inc
41
+++ b/target/riscv/insn_trans/trans_rvzicbo.c.inc
42
@@ -XXX,XX +XXX,XX @@
43
static bool trans_cbo_clean(DisasContext *ctx, arg_cbo_clean *a)
44
{
45
REQUIRE_ZICBOM(ctx);
46
- gen_helper_cbo_clean_flush(tcg_env, cpu_gpr[a->rs1]);
47
+ TCGv src = get_address(ctx, a->rs1, 0);
48
+
49
+ gen_helper_cbo_clean_flush(tcg_env, src);
50
return true;
51
}
52
53
static bool trans_cbo_flush(DisasContext *ctx, arg_cbo_flush *a)
54
{
55
REQUIRE_ZICBOM(ctx);
56
- gen_helper_cbo_clean_flush(tcg_env, cpu_gpr[a->rs1]);
57
+ TCGv src = get_address(ctx, a->rs1, 0);
58
+
59
+ gen_helper_cbo_clean_flush(tcg_env, src);
60
return true;
61
}
62
63
static bool trans_cbo_inval(DisasContext *ctx, arg_cbo_inval *a)
64
{
65
REQUIRE_ZICBOM(ctx);
66
- gen_helper_cbo_inval(tcg_env, cpu_gpr[a->rs1]);
67
+ TCGv src = get_address(ctx, a->rs1, 0);
68
+
69
+ gen_helper_cbo_inval(tcg_env, src);
70
return true;
71
}
72
73
static bool trans_cbo_zero(DisasContext *ctx, arg_cbo_zero *a)
74
{
75
REQUIRE_ZICBOZ(ctx);
76
- gen_helper_cbo_zero(tcg_env, cpu_gpr[a->rs1]);
77
+ TCGv src = get_address(ctx, a->rs1, 0);
78
+
79
+ gen_helper_cbo_zero(tcg_env, src);
80
return true;
81
}
82
--
83
2.45.1
diff view generated by jsdifflib
New patch
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
1
2
3
In AIA spec, each hart (or each hart within a group) has a unique hart
4
number to locate the memory pages of interrupt files in the address
5
space. The number of bits required to represent any hart number is equal
6
to ceil(log2(hmax + 1)), where hmax is the largest hart number among
7
groups.
8
9
However, if the largest hart number among groups is a power of 2, QEMU
10
will pass an inaccurate hart-index-bit setting to Linux. For example, when
11
the guest OS has 4 harts, only ceil(log2(3 + 1)) = 2 bits are sufficient
12
to represent 4 harts, but we passes 3 to Linux. The code needs to be
13
updated to ensure accurate hart-index-bit settings.
14
15
Additionally, a Linux patch[1] is necessary to correctly recover the hart
16
index when the guest OS has only 1 hart, where the hart-index-bit is 0.
17
18
[1] https://lore.kernel.org/lkml/20240415064905.25184-1-yongxuan.wang@sifive.com/t/
19
20
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
21
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
22
Cc: qemu-stable <qemu-stable@nongnu.org>
23
Message-ID: <20240515091129.28116-1-yongxuan.wang@sifive.com>
24
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
25
---
26
target/riscv/kvm/kvm-cpu.c | 9 ++++++++-
27
1 file changed, 8 insertions(+), 1 deletion(-)
28
29
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/kvm/kvm-cpu.c
32
+++ b/target/riscv/kvm/kvm-cpu.c
33
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
34
}
35
}
36
37
- hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
38
+
39
+ if (max_hart_per_socket > 1) {
40
+ max_hart_per_socket--;
41
+ hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
42
+ } else {
43
+ hart_bits = 0;
44
+ }
45
+
46
ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
47
KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
48
&hart_bits, true, NULL);
49
--
50
2.45.1
diff view generated by jsdifflib
1
After claiming the interrupt by reading the claim register we want to
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
clear the register to make sure the interrupt doesn't appear at the next
3
read.
4
2
5
This matches the documentation for the claim register as when an interrupt
3
Commit 33a24910ae changed 'reg_width' to use 'vlenb', i.e. vector length
6
is claimed by a target the relevant bit of IP is cleared (which we already
4
in bytes, when in this context we want 'reg_width' as the length in
7
do): https://docs.opentitan.org/hw/ip/rv_plic/doc/index.html
5
bits.
8
6
9
This also matches the current hardware.
7
Fix 'reg_width' back to the value in bits like 7cb59921c05a
8
("target/riscv/gdbstub.c: use 'vlenb' instead of shifting 'vlen'") set
9
beforehand.
10
10
11
While we're at it, rename 'reg_width' to 'bitsize' to provide a bit more
12
clarity about what the variable represents. 'bitsize' is also used in
13
riscv_gen_dynamic_csr_feature() with the same purpose, i.e. as an input to
14
gdb_feature_builder_append_reg().
15
16
Cc: Akihiko Odaki <akihiko.odaki@daynix.com>
17
Cc: Alex Bennée <alex.bennee@linaro.org>
18
Reported-by: Robin Dapp <rdapp.gcc@gmail.com>
19
Fixes: 33a24910ae ("target/riscv: Use GDBFeature for dynamic XML")
20
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
21
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
22
Acked-by: Alex Bennée <alex.bennee@linaro.org>
23
Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
24
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
25
Cc: qemu-stable <qemu-stable@nongnu.org>
26
Message-ID: <20240517203054.880861-2-dbarboza@ventanamicro.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
27
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
13
Message-id: 68d4575deef2559b7a747f3bda193fcf43af4558.1604629928.git.alistair.francis@wdc.com
14
---
28
---
15
hw/intc/ibex_plic.c | 3 +++
29
target/riscv/gdbstub.c | 6 +++---
16
1 file changed, 3 insertions(+)
30
1 file changed, 3 insertions(+), 3 deletions(-)
17
31
18
diff --git a/hw/intc/ibex_plic.c b/hw/intc/ibex_plic.c
32
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
19
index XXXXXXX..XXXXXXX 100644
33
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/intc/ibex_plic.c
34
--- a/target/riscv/gdbstub.c
21
+++ b/hw/intc/ibex_plic.c
35
+++ b/target/riscv/gdbstub.c
22
@@ -XXX,XX +XXX,XX @@ static uint64_t ibex_plic_read(void *opaque, hwaddr addr,
36
@@ -XXX,XX +XXX,XX @@ static GDBFeature *riscv_gen_dynamic_csr_feature(CPUState *cs, int base_reg)
23
/* Return the current claimed interrupt */
37
static GDBFeature *ricsv_gen_dynamic_vector_feature(CPUState *cs, int base_reg)
24
ret = s->claim;
38
{
25
39
RISCVCPU *cpu = RISCV_CPU(cs);
26
+ /* Clear the claimed interrupt */
40
- int reg_width = cpu->cfg.vlenb;
27
+ s->claim = 0x00000000;
41
+ int bitsize = cpu->cfg.vlenb << 3;
28
+
42
GDBFeatureBuilder builder;
29
/* Update the interrupt status after the claim */
43
int i;
30
ibex_plic_update(s);
44
45
@@ -XXX,XX +XXX,XX @@ static GDBFeature *ricsv_gen_dynamic_vector_feature(CPUState *cs, int base_reg)
46
47
/* First define types and totals in a whole VL */
48
for (i = 0; i < ARRAY_SIZE(vec_lanes); i++) {
49
- int count = reg_width / vec_lanes[i].size;
50
+ int count = bitsize / vec_lanes[i].size;
51
gdb_feature_builder_append_tag(
52
&builder, "<vector id=\"%s\" type=\"%s\" count=\"%d\"/>",
53
vec_lanes[i].id, vec_lanes[i].gdb_type, count);
54
@@ -XXX,XX +XXX,XX @@ static GDBFeature *ricsv_gen_dynamic_vector_feature(CPUState *cs, int base_reg)
55
/* Define vector registers */
56
for (i = 0; i < 32; i++) {
57
gdb_feature_builder_append_reg(&builder, g_strdup_printf("v%d", i),
58
- reg_width, i, "riscv_vector", "vector");
59
+ bitsize, i, "riscv_vector", "vector");
31
}
60
}
61
62
gdb_feature_builder_end(&builder);
32
--
63
--
33
2.29.2
64
2.45.1
34
65
35
66
diff view generated by jsdifflib
New patch
1
From: Alistair Francis <alistair23@gmail.com>
1
2
3
Previously we only listed a single pmpcfg CSR and the first 16 pmpaddr
4
CSRs. This patch fixes this to list all 16 pmpcfg and all 64 pmpaddr
5
CSRs are part of the disassembly.
6
7
Reported-by: Eric DeVolder <eric_devolder@yahoo.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Fixes: ea10325917 ("RISC-V Disassembler")
10
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
11
Cc: qemu-stable <qemu-stable@nongnu.org>
12
Message-ID: <20240514051615.330979-1-alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
disas/riscv.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++-
16
1 file changed, 64 insertions(+), 1 deletion(-)
17
18
diff --git a/disas/riscv.c b/disas/riscv.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/disas/riscv.c
21
+++ b/disas/riscv.c
22
@@ -XXX,XX +XXX,XX @@ static const char *csr_name(int csrno)
23
case 0x0383: return "mibound";
24
case 0x0384: return "mdbase";
25
case 0x0385: return "mdbound";
26
- case 0x03a0: return "pmpcfg3";
27
+ case 0x03a0: return "pmpcfg0";
28
+ case 0x03a1: return "pmpcfg1";
29
+ case 0x03a2: return "pmpcfg2";
30
+ case 0x03a3: return "pmpcfg3";
31
+ case 0x03a4: return "pmpcfg4";
32
+ case 0x03a5: return "pmpcfg5";
33
+ case 0x03a6: return "pmpcfg6";
34
+ case 0x03a7: return "pmpcfg7";
35
+ case 0x03a8: return "pmpcfg8";
36
+ case 0x03a9: return "pmpcfg9";
37
+ case 0x03aa: return "pmpcfg10";
38
+ case 0x03ab: return "pmpcfg11";
39
+ case 0x03ac: return "pmpcfg12";
40
+ case 0x03ad: return "pmpcfg13";
41
+ case 0x03ae: return "pmpcfg14";
42
+ case 0x03af: return "pmpcfg15";
43
case 0x03b0: return "pmpaddr0";
44
case 0x03b1: return "pmpaddr1";
45
case 0x03b2: return "pmpaddr2";
46
@@ -XXX,XX +XXX,XX @@ static const char *csr_name(int csrno)
47
case 0x03bd: return "pmpaddr13";
48
case 0x03be: return "pmpaddr14";
49
case 0x03bf: return "pmpaddr15";
50
+ case 0x03c0: return "pmpaddr16";
51
+ case 0x03c1: return "pmpaddr17";
52
+ case 0x03c2: return "pmpaddr18";
53
+ case 0x03c3: return "pmpaddr19";
54
+ case 0x03c4: return "pmpaddr20";
55
+ case 0x03c5: return "pmpaddr21";
56
+ case 0x03c6: return "pmpaddr22";
57
+ case 0x03c7: return "pmpaddr23";
58
+ case 0x03c8: return "pmpaddr24";
59
+ case 0x03c9: return "pmpaddr25";
60
+ case 0x03ca: return "pmpaddr26";
61
+ case 0x03cb: return "pmpaddr27";
62
+ case 0x03cc: return "pmpaddr28";
63
+ case 0x03cd: return "pmpaddr29";
64
+ case 0x03ce: return "pmpaddr30";
65
+ case 0x03cf: return "pmpaddr31";
66
+ case 0x03d0: return "pmpaddr32";
67
+ case 0x03d1: return "pmpaddr33";
68
+ case 0x03d2: return "pmpaddr34";
69
+ case 0x03d3: return "pmpaddr35";
70
+ case 0x03d4: return "pmpaddr36";
71
+ case 0x03d5: return "pmpaddr37";
72
+ case 0x03d6: return "pmpaddr38";
73
+ case 0x03d7: return "pmpaddr39";
74
+ case 0x03d8: return "pmpaddr40";
75
+ case 0x03d9: return "pmpaddr41";
76
+ case 0x03da: return "pmpaddr42";
77
+ case 0x03db: return "pmpaddr43";
78
+ case 0x03dc: return "pmpaddr44";
79
+ case 0x03dd: return "pmpaddr45";
80
+ case 0x03de: return "pmpaddr46";
81
+ case 0x03df: return "pmpaddr47";
82
+ case 0x03e0: return "pmpaddr48";
83
+ case 0x03e1: return "pmpaddr49";
84
+ case 0x03e2: return "pmpaddr50";
85
+ case 0x03e3: return "pmpaddr51";
86
+ case 0x03e4: return "pmpaddr52";
87
+ case 0x03e5: return "pmpaddr53";
88
+ case 0x03e6: return "pmpaddr54";
89
+ case 0x03e7: return "pmpaddr55";
90
+ case 0x03e8: return "pmpaddr56";
91
+ case 0x03e9: return "pmpaddr57";
92
+ case 0x03ea: return "pmpaddr58";
93
+ case 0x03eb: return "pmpaddr59";
94
+ case 0x03ec: return "pmpaddr60";
95
+ case 0x03ed: return "pmpaddr61";
96
+ case 0x03ee: return "pmpaddr62";
97
+ case 0x03ef: return "pmpaddr63";
98
case 0x0780: return "mtohost";
99
case 0x0781: return "mfromhost";
100
case 0x0782: return "mreset";
101
--
102
2.45.1
diff view generated by jsdifflib
1
When performing the hypervisor load/store operations set the MMU mode to
1
From: Yu-Ming Chang <yumin686@andestech.com>
2
indicate that we are virtualised.
3
2
3
Both CSRRS and CSRRC always read the addressed CSR and cause any read side
4
effects regardless of rs1 and rd fields. Note that if rs1 specifies a register
5
holding a zero value other than x0, the instruction will still attempt to write
6
the unmodified value back to the CSR and will cause any attendant side effects.
7
8
So if CSRRS or CSRRC tries to write a read-only CSR with rs1 which specifies
9
a register holding a zero value, an illegal instruction exception should be
10
raised.
11
12
Signed-off-by: Yu-Ming Chang <yumin686@andestech.com>
13
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
14
Message-ID: <20240403070823.80897-1-yumin686@andestech.com>
4
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
5
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
6
Message-id: e411c61a1452cad16853f13cac2fb86dc91ebee8.1604464950.git.alistair.francis@wdc.com
7
---
16
---
8
target/riscv/op_helper.c | 30 +++++++++++++++++-------------
17
target/riscv/cpu.h | 4 ++++
9
1 file changed, 17 insertions(+), 13 deletions(-)
18
target/riscv/csr.c | 51 ++++++++++++++++++++++++++++++++++++----
19
target/riscv/op_helper.c | 6 ++---
20
3 files changed, 53 insertions(+), 8 deletions(-)
10
21
22
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/cpu.h
25
+++ b/target/riscv/cpu.h
26
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
27
void riscv_cpu_update_mask(CPURISCVState *env);
28
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
29
30
+RISCVException riscv_csrr(CPURISCVState *env, int csrno,
31
+ target_ulong *ret_value);
32
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
33
target_ulong *ret_value,
34
target_ulong new_value, target_ulong write_mask);
35
@@ -XXX,XX +XXX,XX @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno,
36
target_ulong new_value,
37
target_ulong write_mask);
38
39
+RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
40
+ Int128 *ret_value);
41
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
42
Int128 *ret_value,
43
Int128 new_value, Int128 write_mask);
44
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/riscv/csr.c
47
+++ b/target/riscv/csr.c
48
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_seed(CPURISCVState *env, int csrno,
49
50
static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
51
int csrno,
52
- bool write_mask)
53
+ bool write)
54
{
55
/* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
56
bool read_only = get_field(csrno, 0xC00) == 3;
57
@@ -XXX,XX +XXX,XX @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
58
}
59
60
/* read / write check */
61
- if (write_mask && read_only) {
62
+ if (write && read_only) {
63
return RISCV_EXCP_ILLEGAL_INST;
64
}
65
66
@@ -XXX,XX +XXX,XX @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
67
return RISCV_EXCP_NONE;
68
}
69
70
+RISCVException riscv_csrr(CPURISCVState *env, int csrno,
71
+ target_ulong *ret_value)
72
+{
73
+ RISCVException ret = riscv_csrrw_check(env, csrno, false);
74
+ if (ret != RISCV_EXCP_NONE) {
75
+ return ret;
76
+ }
77
+
78
+ return riscv_csrrw_do64(env, csrno, ret_value, 0, 0);
79
+}
80
+
81
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
82
target_ulong *ret_value,
83
target_ulong new_value, target_ulong write_mask)
84
{
85
- RISCVException ret = riscv_csrrw_check(env, csrno, write_mask);
86
+ RISCVException ret = riscv_csrrw_check(env, csrno, true);
87
if (ret != RISCV_EXCP_NONE) {
88
return ret;
89
}
90
@@ -XXX,XX +XXX,XX @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
91
return RISCV_EXCP_NONE;
92
}
93
94
+RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno,
95
+ Int128 *ret_value)
96
+{
97
+ RISCVException ret;
98
+
99
+ ret = riscv_csrrw_check(env, csrno, false);
100
+ if (ret != RISCV_EXCP_NONE) {
101
+ return ret;
102
+ }
103
+
104
+ if (csr_ops[csrno].read128) {
105
+ return riscv_csrrw_do128(env, csrno, ret_value,
106
+ int128_zero(), int128_zero());
107
+ }
108
+
109
+ /*
110
+ * Fall back to 64-bit version for now, if the 128-bit alternative isn't
111
+ * at all defined.
112
+ * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
113
+ * significant), for those, this fallback is correctly handling the
114
+ * accesses
115
+ */
116
+ target_ulong old_value;
117
+ ret = riscv_csrrw_do64(env, csrno, &old_value,
118
+ (target_ulong)0,
119
+ (target_ulong)0);
120
+ if (ret == RISCV_EXCP_NONE && ret_value) {
121
+ *ret_value = int128_make64(old_value);
122
+ }
123
+ return ret;
124
+}
125
+
126
RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
127
Int128 *ret_value,
128
Int128 new_value, Int128 write_mask)
129
{
130
RISCVException ret;
131
132
- ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask));
133
+ ret = riscv_csrrw_check(env, csrno, true);
134
if (ret != RISCV_EXCP_NONE) {
135
return ret;
136
}
11
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
137
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
12
index XXXXXXX..XXXXXXX 100644
138
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/op_helper.c
139
--- a/target/riscv/op_helper.c
14
+++ b/target/riscv/op_helper.c
140
+++ b/target/riscv/op_helper.c
15
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_load(CPURISCVState *env, target_ulong address,
141
@@ -XXX,XX +XXX,XX @@ target_ulong helper_csrr(CPURISCVState *env, int csr)
16
(env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
142
}
17
get_field(env->hstatus, HSTATUS_HU))) {
143
18
target_ulong pte;
144
target_ulong val = 0;
19
+ int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
145
- RISCVException ret = riscv_csrrw(env, csr, &val, 0, 0);
20
146
+ RISCVException ret = riscv_csrr(env, csr, &val);
21
riscv_cpu_set_two_stage_lookup(env, true);
147
22
148
if (ret != RISCV_EXCP_NONE) {
23
switch (memop) {
149
riscv_raise_exception(env, ret, GETPC());
24
case MO_SB:
150
@@ -XXX,XX +XXX,XX @@ target_ulong helper_csrrw(CPURISCVState *env, int csr,
25
- pte = cpu_ldsb_data_ra(env, address, GETPC());
151
target_ulong helper_csrr_i128(CPURISCVState *env, int csr)
26
+ pte = cpu_ldsb_mmuidx_ra(env, address, mmu_idx, GETPC());
152
{
27
break;
153
Int128 rv = int128_zero();
28
case MO_UB:
154
- RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
29
- pte = cpu_ldub_data_ra(env, address, GETPC());
155
- int128_zero(),
30
+ pte = cpu_ldub_mmuidx_ra(env, address, mmu_idx, GETPC());
156
- int128_zero());
31
break;
157
+ RISCVException ret = riscv_csrr_i128(env, csr, &rv);
32
case MO_TESW:
158
33
- pte = cpu_ldsw_data_ra(env, address, GETPC());
159
if (ret != RISCV_EXCP_NONE) {
34
+ pte = cpu_ldsw_mmuidx_ra(env, address, mmu_idx, GETPC());
160
riscv_raise_exception(env, ret, GETPC());
35
break;
36
case MO_TEUW:
37
- pte = cpu_lduw_data_ra(env, address, GETPC());
38
+ pte = cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
39
break;
40
case MO_TESL:
41
- pte = cpu_ldl_data_ra(env, address, GETPC());
42
+ pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
43
break;
44
case MO_TEUL:
45
- pte = cpu_ldl_data_ra(env, address, GETPC());
46
+ pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
47
break;
48
case MO_TEQ:
49
- pte = cpu_ldq_data_ra(env, address, GETPC());
50
+ pte = cpu_ldq_mmuidx_ra(env, address, mmu_idx, GETPC());
51
break;
52
default:
53
g_assert_not_reached();
54
@@ -XXX,XX +XXX,XX @@ void helper_hyp_store(CPURISCVState *env, target_ulong address,
55
(env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
56
(env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
57
get_field(env->hstatus, HSTATUS_HU))) {
58
+ int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
59
+
60
riscv_cpu_set_two_stage_lookup(env, true);
61
62
switch (memop) {
63
case MO_SB:
64
case MO_UB:
65
- cpu_stb_data_ra(env, address, val, GETPC());
66
+ cpu_stb_mmuidx_ra(env, address, val, mmu_idx, GETPC());
67
break;
68
case MO_TESW:
69
case MO_TEUW:
70
- cpu_stw_data_ra(env, address, val, GETPC());
71
+ cpu_stw_mmuidx_ra(env, address, val, mmu_idx, GETPC());
72
break;
73
case MO_TESL:
74
case MO_TEUL:
75
- cpu_stl_data_ra(env, address, val, GETPC());
76
+ cpu_stl_mmuidx_ra(env, address, val, mmu_idx, GETPC());
77
break;
78
case MO_TEQ:
79
- cpu_stq_data_ra(env, address, val, GETPC());
80
+ cpu_stq_mmuidx_ra(env, address, val, mmu_idx, GETPC());
81
break;
82
default:
83
g_assert_not_reached();
84
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address,
85
(env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
86
get_field(env->hstatus, HSTATUS_HU))) {
87
target_ulong pte;
88
+ int mmu_idx = cpu_mmu_index(env, false) | TB_FLAGS_PRIV_HYP_ACCESS_MASK;
89
90
riscv_cpu_set_two_stage_lookup(env, true);
91
92
switch (memop) {
93
case MO_TEUW:
94
- pte = cpu_lduw_data_ra(env, address, GETPC());
95
+ pte = cpu_lduw_mmuidx_ra(env, address, mmu_idx, GETPC());
96
break;
97
case MO_TEUL:
98
- pte = cpu_ldl_data_ra(env, address, GETPC());
99
+ pte = cpu_ldl_mmuidx_ra(env, address, mmu_idx, GETPC());
100
break;
101
default:
102
g_assert_not_reached();
103
--
161
--
104
2.29.2
162
2.45.1
105
106
diff view generated by jsdifflib