1
The following changes since commit 4d5d933bbc7cc52f6cc6b9021f91fa06266222d5:
1
The following changes since commit 1dae461a913f9da88df05de6e2020d3134356f2e:
2
2
3
Merge tag 'pull-xenfv-20250116' of git://git.infradead.org/users/dwmw2/qemu into staging (2025-01-16 09:03:43 -0500)
3
Update version for v10.0.0-rc0 release (2025-03-18 10:18:14 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/alistair23/qemu.git tags/pull-riscv-to-apply-20250117-2
7
https://github.com/alistair23/qemu.git tags/pull-riscv-to-apply-20250319
8
8
9
for you to fetch changes up to f195d4af294f76762407e2273a564950a8f42db6:
9
for you to fetch changes up to ffe4db11f8aed79c7ec7d3ebd92674a1cfab4fe7:
10
10
11
hw/char/riscv_htif: Convert HTIF_DEBUG() to trace events (2025-01-17 15:52:56 +1000)
11
target/riscv: Add check for 16-bit aligned PC for different priv versions. (2025-03-19 17:11:46 +1000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Second RISC-V PR for 10.0
14
Fourth RISC-V PR for 10.0
15
15
16
* Reduce the overhead for simple RISC-V vector unit-stride loads and stores
16
* Fix broken emulation link
17
* Add V bit to GDB priv reg
17
* Optimize the memory probing for vector fault-only-first loads
18
* Add 'sha' support
18
* Fix access permission checks for CSR_SSP
19
* Add traces for exceptions in user mode
19
* Fixes a bug against `ssamoswap` behavior in M-mode
20
* Update Pointer Masking to Zjpm v1.0
20
* Fix IOMMU process directory table walk
21
* Add Smrnmi support
21
* Fix OVERFLOW_BEFORE_WIDEN in rmw_sctrdepth()
22
* Fix timebase-frequency when using KVM acceleration
22
* Enhance VSTART and VL checks for vector instructions
23
* Add RISC-V Counter delegation ISA extension support
23
* Fix handling of cpu mask in riscv_hwprobe syscall
24
* Add support for Smdbltrp and Ssdbltrp extensions
24
* Add check for 16-bit aligned PC for different priv versions
25
* Introduce a translation tag for the IOMMU page table cache
26
* Support Supm and Sspm as part of Zjpm v1.0
27
* Convert htif debug prints to trace events
28
25
29
----------------------------------------------------------------
26
----------------------------------------------------------------
30
Alexey Baturo (8):
27
Chao Liu (2):
31
target/riscv: Remove obsolete pointer masking extension code.
28
target/riscv: refactor VSTART_CHECK_EARLY_EXIT() to accept vl as a parameter
32
target/riscv: Add new CSR fields for S{sn, mn, m}pm extensions as part of Zjpm v1.0
29
target/riscv: fix handling of nop for vstart >= vl in some vector instruction
33
target/riscv: Add helper functions to calculate current number of masked bits for pointer masking
34
target/riscv: Add pointer masking tb flags
35
target/riscv: Update address modify functions to take into account pointer masking
36
target/riscv: Apply pointer masking for virtualized memory accesses
37
target/riscv: Enable updates for pointer masking variables and thus enable pointer masking extension
38
target/riscv: Support Supm and Sspm as part of Zjpm v1.0
39
30
40
Atish Patra (5):
31
Daniel Henrique Barboza (1):
41
target/riscv: Enable S*stateen bits for AIA
32
target/riscv/csr.c: fix OVERFLOW_BEFORE_WIDEN in rmw_sctrdepth()
42
target/riscv: Add properties for counter delegation ISA extensions
43
target/riscv: Invoke pmu init after feature enable
44
target/riscv: Add implied rule for counter delegation extensions
45
target/riscv: Add configuration for S[m|s]csrind, Smcdeleg/Ssccfg
46
33
47
Clément Léger (9):
34
Deepak Gupta (2):
48
target/riscv: Fix henvcfg potentially containing stale bits
35
target/riscv: fix access permission checks for CSR_SSP
49
target/riscv: Add Ssdbltrp CSRs handling
36
target/riscv: fixes a bug against `ssamoswap` behavior in M-mode
50
target/riscv: Implement Ssdbltrp sret, mret and mnret behavior
51
target/riscv: Implement Ssdbltrp exception handling
52
target/riscv: Add Ssdbltrp ISA extension enable switch
53
target/riscv: Add Smdbltrp CSRs handling
54
target/riscv: Implement Smdbltrp sret, mret and mnret behavior
55
target/riscv: Implement Smdbltrp behavior
56
target/riscv: Add Smdbltrp ISA extension enable switch
57
58
Craig Blackmore (2):
59
target/riscv: rvv: fix typo in vext continuous ldst function names
60
target/riscv: rvv: speed up small unit-stride loads and stores
61
62
Daniel Henrique Barboza (9):
63
target/riscv: add shcounterenw
64
target/riscv: add shvstvala
65
target/riscv: add shtvala
66
target/riscv: add shvstvecd
67
target/riscv: add shvsatpa
68
target/riscv: add shgatpa
69
target/riscv/tcg: add sha
70
target/riscv: use RISCVException enum in exception helpers
71
target/riscv: add trace in riscv_raise_exception()
72
73
Frank Chang (1):
74
target/riscv: Add Zicfilp support for Smrnmi
75
37
76
Jason Chien (1):
38
Jason Chien (1):
77
hw/riscv/riscv-iommu.c: Introduce a translation tag for the page table cache
39
hw/riscv/riscv-iommu: Fix process directory table walk
78
40
79
Kaiwen Xue (6):
41
Paolo Savini (1):
80
target/riscv: Add properties for Indirect CSR Access extension
42
optimize the memory probing for vector fault-only-first loads.
81
target/riscv: Decouple AIA processing from xiselect and xireg
82
target/riscv: Support generic CSR indirect access
83
target/riscv: Add counter delegation definitions
84
target/riscv: Add select value range check for counter delegation
85
target/riscv: Add counter delegation/configuration support
86
43
87
Philippe Mathieu-Daudé (3):
44
Richard Henderson (1):
88
target/riscv: Have kvm_riscv_get_timebase_frequency() take RISCVCPU cpu
45
linux-user/riscv: Fix handling of cpu mask in riscv_hwprobe syscall
89
hw/riscv/virt: Remove unnecessary use of &first_cpu
90
hw/char/riscv_htif: Convert HTIF_DEBUG() to trace events
91
46
92
Tommy Wu (5):
47
Santiago Monserrat Campanello (1):
93
target/riscv: Add 'ext_smrnmi' in the RISCVCPUConfig
48
docs/about/emulation: Fix broken link
94
target/riscv: Add Smrnmi CSRs
95
target/riscv: Handle Smrnmi interrupt and exception
96
target/riscv: Add Smrnmi mnret instruction
97
target/riscv: Add Smrnmi cpu extension
98
49
99
Yanfeng Liu (1):
50
Yu-Ming Chang (1):
100
riscv/gdbstub: add V bit to priv reg
51
target/riscv: Add check for 16-bit aligned PC for different priv versions.
101
52
102
include/hw/riscv/riscv_hart.h | 4 +
53
docs/about/emulation.rst | 2 +-
103
target/riscv/cpu.h | 65 +-
54
hw/riscv/riscv-iommu-bits.h | 6 +-
104
target/riscv/cpu_bits.h | 157 ++-
55
target/riscv/cpu.h | 12 ++
105
target/riscv/cpu_cfg.h | 13 +
56
target/riscv/vector_internals.h | 12 +-
106
target/riscv/helper.h | 1 +
57
hw/riscv/riscv-iommu.c | 4 +-
107
target/riscv/internals.h | 54 +
58
linux-user/syscall.c | 55 ++++----
108
target/riscv/kvm/kvm_riscv.h | 4 +-
59
target/riscv/csr.c | 7 +-
109
target/riscv/pmp.h | 1 +
60
target/riscv/op_helper.c | 8 +-
110
target/riscv/insn32.decode | 3 +
61
target/riscv/translate.c | 4 +-
111
hw/char/riscv_htif.c | 15 +-
62
target/riscv/vcrypto_helper.c | 32 ++---
112
hw/riscv/riscv-iommu.c | 205 +++-
63
target/riscv/vector_helper.c | 186 ++++++++++++++------------
113
hw/riscv/riscv_hart.c | 41 +
64
target/riscv/vector_internals.c | 4 +-
114
hw/riscv/virt.c | 2 +-
65
target/riscv/insn_trans/trans_rvi.c.inc | 8 +-
115
target/riscv/cpu.c | 97 +-
66
target/riscv/insn_trans/trans_rvzicfiss.c.inc | 17 +++
116
target/riscv/cpu_helper.c | 311 +++++-
67
14 files changed, 214 insertions(+), 143 deletions(-)
117
target/riscv/csr.c | 1257 +++++++++++++++++-------
118
target/riscv/gdbstub.c | 23 +-
119
target/riscv/kvm/kvm-cpu.c | 4 +-
120
target/riscv/machine.c | 18 +-
121
target/riscv/op_helper.c | 126 ++-
122
target/riscv/pmp.c | 14 +-
123
target/riscv/tcg/tcg-cpu.c | 50 +-
124
target/riscv/translate.c | 49 +-
125
target/riscv/vector_helper.c | 31 +-
126
target/riscv/insn_trans/trans_privileged.c.inc | 20 +
127
hw/char/trace-events | 4 +
128
target/riscv/trace-events | 3 +
129
tests/data/acpi/riscv64/virt/RHCT | Bin 332 -> 390 bytes
130
28 files changed, 1852 insertions(+), 720 deletions(-)
131
diff view generated by jsdifflib
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
1
From: Santiago Monserrat Campanello <santimonserr@gmail.com>
2
2
3
virt_machine_init() creates the HARTs vCPUs, then later
3
semihosting link to risc-v changed
4
virt_machine_done() calls create_fdt_sockets(), so the
5
latter has access to the first vCPU via:
6
4
7
RISCVVirtState {
5
Signed-off-by: Santiago Monserrat Campanello <santimonserr@gmail.com>
8
RISCVHartArrayState {
6
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2717
9
RISCVCPU *harts;
10
...
11
12
} soc[VIRT_SOCKETS_MAX];
13
...
14
15
} s;
16
17
Directly use that instead of the &first_cpu global.
18
19
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
20
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
21
Message-ID: <20250112231344.34632-3-philmd@linaro.org>
8
Reviewed-by: Thomas Huth <thuth@redhat.com>
9
Message-ID: <20250305102632.91376-1-santimonserr@gmail.com>
22
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
23
---
11
---
24
hw/riscv/virt.c | 2 +-
12
docs/about/emulation.rst | 2 +-
25
1 file changed, 1 insertion(+), 1 deletion(-)
13
1 file changed, 1 insertion(+), 1 deletion(-)
26
14
27
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
15
diff --git a/docs/about/emulation.rst b/docs/about/emulation.rst
28
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
29
--- a/hw/riscv/virt.c
17
--- a/docs/about/emulation.rst
30
+++ b/hw/riscv/virt.c
18
+++ b/docs/about/emulation.rst
31
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
19
@@ -XXX,XX +XXX,XX @@ for that architecture.
32
qemu_fdt_add_subnode(ms->fdt, "/cpus");
20
- Unified Hosting Interface (MD01069)
33
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "timebase-frequency",
21
* - RISC-V
34
kvm_enabled() ?
22
- System and User-mode
35
- kvm_riscv_get_timebase_frequency(RISCV_CPU(first_cpu)) :
23
- - https://github.com/riscv/riscv-semihosting-spec/blob/main/riscv-semihosting-spec.adoc
36
+ kvm_riscv_get_timebase_frequency(&s->soc->harts[0]) :
24
+ - https://github.com/riscv-non-isa/riscv-semihosting/blob/main/riscv-semihosting.adoc
37
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ);
25
* - Xtensa
38
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
26
- System
39
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1);
27
- Tensilica ISS SIMCALL
40
--
28
--
41
2.47.1
29
2.48.1
42
43
diff view generated by jsdifflib
1
From: Alexey Baturo <baturo.alexey@gmail.com>
1
From: Paolo Savini <paolo.savini@embecosm.com>
2
2
3
Zjpm extension is finally ratified. And it's much simplier compared to the experimental one.
3
Fault-only-first loads in the RISC-V vector extension need to update
4
The newer version doesn't allow to specify custom mask or base for pointer masking.
4
the vl with the element index that causes an exception.
5
Instead it allows only certain options for masking top bits.
5
In order to ensure this the emulation of this instruction used to probe the
6
memory covered by the load operation with a loop that iterated over each element
7
so that when a flag was raised it was possible to set the vl to the
8
corresponding element index.
9
This loop was executed every time whether an exception happened or not.
6
10
7
Signed-off-by: Alexey Baturo <baturo.alexey@gmail.com>
11
This commit removes the per element memory probing from the main execution path
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
12
and adds a broad memory probing first. If this probing raises any flag that is
9
Message-ID: <20250106102346.1100149-2-baturo.alexey@gmail.com>
13
not a watchpoint flag (that per standard is allowed by this instruction) we
14
proceed with the per element probing to find the index of the element causing
15
the exception and set vl to such index.
16
17
Signed-off-by: Paolo Savini <paolo.savini@embecosm.com>
18
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
19
Message-ID: <20250221155320.59159-2-paolo.savini@embecosm.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
21
---
12
target/riscv/cpu.h | 33 +---
22
target/riscv/vector_helper.c | 103 ++++++++++++++++++++---------------
13
target/riscv/cpu_bits.h | 87 ----------
23
1 file changed, 58 insertions(+), 45 deletions(-)
14
target/riscv/cpu.c | 13 +-
15
target/riscv/cpu_helper.c | 52 ------
16
target/riscv/csr.c | 326 -----------------------------------
17
target/riscv/machine.c | 17 +-
18
target/riscv/tcg/tcg-cpu.c | 5 +-
19
target/riscv/translate.c | 28 +--
20
target/riscv/vector_helper.c | 2 +-
21
9 files changed, 19 insertions(+), 544 deletions(-)
22
24
23
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/riscv/cpu.h
26
+++ b/target/riscv/cpu.h
27
@@ -XXX,XX +XXX,XX @@ typedef struct CPUArchState CPURISCVState;
28
#define RVS RV('S')
29
#define RVU RV('U')
30
#define RVH RV('H')
31
-#define RVJ RV('J')
32
#define RVG RV('G')
33
#define RVB RV('B')
34
35
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
36
/* True if in debugger mode. */
37
bool debugger;
38
39
- /*
40
- * CSRs for PointerMasking extension
41
- */
42
- target_ulong mmte;
43
- target_ulong mpmmask;
44
- target_ulong mpmbase;
45
- target_ulong spmmask;
46
- target_ulong spmbase;
47
- target_ulong upmmask;
48
- target_ulong upmbase;
49
-
50
uint64_t mstateen[SMSTATEEN_MAX_COUNT];
51
uint64_t hstateen[SMSTATEEN_MAX_COUNT];
52
uint64_t sstateen[SMSTATEEN_MAX_COUNT];
53
uint64_t henvcfg;
54
#endif
55
- target_ulong cur_pmmask;
56
- target_ulong cur_pmbase;
57
58
/* Fields from here on are preserved across CPU reset. */
59
QEMUTimer *stimer; /* Internal timer for S-mode interrupt */
60
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, XL, 16, 2)
61
/* If PointerMasking should be applied */
62
FIELD(TB_FLAGS, PM_MASK_ENABLED, 18, 1)
63
FIELD(TB_FLAGS, PM_BASE_ENABLED, 19, 1)
64
-FIELD(TB_FLAGS, VTA, 20, 1)
65
-FIELD(TB_FLAGS, VMA, 21, 1)
66
+FIELD(TB_FLAGS, VTA, 18, 1)
67
+FIELD(TB_FLAGS, VMA, 19, 1)
68
/* Native debug itrigger */
69
-FIELD(TB_FLAGS, ITRIGGER, 22, 1)
70
+FIELD(TB_FLAGS, ITRIGGER, 20, 1)
71
/* Virtual mode enabled */
72
-FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
73
-FIELD(TB_FLAGS, PRIV, 24, 2)
74
-FIELD(TB_FLAGS, AXL, 26, 2)
75
+FIELD(TB_FLAGS, VIRT_ENABLED, 21, 1)
76
+FIELD(TB_FLAGS, PRIV, 22, 2)
77
+FIELD(TB_FLAGS, AXL, 24, 2)
78
/* zicfilp needs a TB flag to track indirect branches */
79
-FIELD(TB_FLAGS, FCFI_ENABLED, 28, 1)
80
-FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 29, 1)
81
+FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
82
+FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
83
/* zicfiss needs a TB flag so that correct TB is located based on tb flags */
84
-FIELD(TB_FLAGS, BCFI_ENABLED, 30, 1)
85
+FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
86
87
#ifdef TARGET_RISCV32
88
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
89
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_get_vlmax(uint32_t vlenb, uint32_t vsew,
90
void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
91
uint64_t *cs_base, uint32_t *pflags);
92
93
-void riscv_cpu_update_mask(CPURISCVState *env);
94
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
95
96
RISCVException riscv_csrr(CPURISCVState *env, int csrno,
97
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/riscv/cpu_bits.h
100
+++ b/target/riscv/cpu_bits.h
101
@@ -XXX,XX +XXX,XX @@
102
#define CSR_MHPMCOUNTER30H 0xb9e
103
#define CSR_MHPMCOUNTER31H 0xb9f
104
105
-/*
106
- * User PointerMasking registers
107
- * NB: actual CSR numbers might be changed in future
108
- */
109
-#define CSR_UMTE 0x4c0
110
-#define CSR_UPMMASK 0x4c1
111
-#define CSR_UPMBASE 0x4c2
112
-
113
-/*
114
- * Machine PointerMasking registers
115
- * NB: actual CSR numbers might be changed in future
116
- */
117
-#define CSR_MMTE 0x3c0
118
-#define CSR_MPMMASK 0x3c1
119
-#define CSR_MPMBASE 0x3c2
120
-
121
-/*
122
- * Supervisor PointerMaster registers
123
- * NB: actual CSR numbers might be changed in future
124
- */
125
-#define CSR_SMTE 0x1c0
126
-#define CSR_SPMMASK 0x1c1
127
-#define CSR_SPMBASE 0x1c2
128
-
129
-/*
130
- * Hypervisor PointerMaster registers
131
- * NB: actual CSR numbers might be changed in future
132
- */
133
-#define CSR_VSMTE 0x2c0
134
-#define CSR_VSPMMASK 0x2c1
135
-#define CSR_VSPMBASE 0x2c2
136
#define CSR_SCOUNTOVF 0xda0
137
138
/* Crypto Extension */
139
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
140
#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
141
#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
142
143
-/* General PointerMasking CSR bits */
144
-#define PM_ENABLE 0x00000001ULL
145
-#define PM_CURRENT 0x00000002ULL
146
-#define PM_INSN 0x00000004ULL
147
-
148
/* Execution environment configuration bits */
149
#define MENVCFG_FIOM BIT(0)
150
#define MENVCFG_LPE BIT(2) /* zicfilp */
151
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
152
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
153
#define HENVCFGH_STCE MENVCFGH_STCE
154
155
-/* Offsets for every pair of control bits per each priv level */
156
-#define XS_OFFSET 0ULL
157
-#define U_OFFSET 2ULL
158
-#define S_OFFSET 5ULL
159
-#define M_OFFSET 8ULL
160
-
161
-#define PM_XS_BITS (EXT_STATUS_MASK << XS_OFFSET)
162
-#define U_PM_ENABLE (PM_ENABLE << U_OFFSET)
163
-#define U_PM_CURRENT (PM_CURRENT << U_OFFSET)
164
-#define U_PM_INSN (PM_INSN << U_OFFSET)
165
-#define S_PM_ENABLE (PM_ENABLE << S_OFFSET)
166
-#define S_PM_CURRENT (PM_CURRENT << S_OFFSET)
167
-#define S_PM_INSN (PM_INSN << S_OFFSET)
168
-#define M_PM_ENABLE (PM_ENABLE << M_OFFSET)
169
-#define M_PM_CURRENT (PM_CURRENT << M_OFFSET)
170
-#define M_PM_INSN (PM_INSN << M_OFFSET)
171
-
172
-/* mmte CSR bits */
173
-#define MMTE_PM_XS_BITS PM_XS_BITS
174
-#define MMTE_U_PM_ENABLE U_PM_ENABLE
175
-#define MMTE_U_PM_CURRENT U_PM_CURRENT
176
-#define MMTE_U_PM_INSN U_PM_INSN
177
-#define MMTE_S_PM_ENABLE S_PM_ENABLE
178
-#define MMTE_S_PM_CURRENT S_PM_CURRENT
179
-#define MMTE_S_PM_INSN S_PM_INSN
180
-#define MMTE_M_PM_ENABLE M_PM_ENABLE
181
-#define MMTE_M_PM_CURRENT M_PM_CURRENT
182
-#define MMTE_M_PM_INSN M_PM_INSN
183
-#define MMTE_MASK (MMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | MMTE_U_PM_INSN | \
184
- MMTE_S_PM_ENABLE | MMTE_S_PM_CURRENT | MMTE_S_PM_INSN | \
185
- MMTE_M_PM_ENABLE | MMTE_M_PM_CURRENT | MMTE_M_PM_INSN | \
186
- MMTE_PM_XS_BITS)
187
-
188
-/* (v)smte CSR bits */
189
-#define SMTE_PM_XS_BITS PM_XS_BITS
190
-#define SMTE_U_PM_ENABLE U_PM_ENABLE
191
-#define SMTE_U_PM_CURRENT U_PM_CURRENT
192
-#define SMTE_U_PM_INSN U_PM_INSN
193
-#define SMTE_S_PM_ENABLE S_PM_ENABLE
194
-#define SMTE_S_PM_CURRENT S_PM_CURRENT
195
-#define SMTE_S_PM_INSN S_PM_INSN
196
-#define SMTE_MASK (SMTE_U_PM_ENABLE | SMTE_U_PM_CURRENT | SMTE_U_PM_INSN | \
197
- SMTE_S_PM_ENABLE | SMTE_S_PM_CURRENT | SMTE_S_PM_INSN | \
198
- SMTE_PM_XS_BITS)
199
-
200
-/* umte CSR bits */
201
-#define UMTE_U_PM_ENABLE U_PM_ENABLE
202
-#define UMTE_U_PM_CURRENT U_PM_CURRENT
203
-#define UMTE_U_PM_INSN U_PM_INSN
204
-#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
205
-
206
/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
207
#define ISELECT_IPRIO0 0x30
208
#define ISELECT_IPRIO15 0x3f
209
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
210
index XXXXXXX..XXXXXXX 100644
211
--- a/target/riscv/cpu.c
212
+++ b/target/riscv/cpu.c
213
@@ -XXX,XX +XXX,XX @@
214
/* RISC-V CPU definitions */
215
static const char riscv_single_letter_exts[] = "IEMAFDQCBPVH";
216
const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV,
217
- RVC, RVS, RVU, RVH, RVJ, RVG, RVB, 0};
218
+ RVC, RVS, RVU, RVH, RVG, RVB, 0};
219
220
/*
221
* From vector_helper.c
222
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
223
CSR_MSCRATCH,
224
CSR_SSCRATCH,
225
CSR_SATP,
226
- CSR_MMTE,
227
- CSR_UPMBASE,
228
- CSR_UPMMASK,
229
- CSR_SPMBASE,
230
- CSR_SPMMASK,
231
- CSR_MPMBASE,
232
- CSR_MPMMASK,
233
};
234
235
for (i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
236
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
237
}
238
i++;
239
}
240
- /* mmte is supposed to have pm.current hardwired to 1 */
241
- env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
242
243
/*
244
* Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor
245
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
246
env->ssp = 0;
247
248
env->xl = riscv_cpu_mxl(env);
249
- riscv_cpu_update_mask(env);
250
cs->exception_index = RISCV_EXCP_NONE;
251
env->load_res = -1;
252
set_default_nan_mode(1, &env->fp_status);
253
@@ -XXX,XX +XXX,XX @@ static const MISAExtInfo misa_ext_info_arr[] = {
254
MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
255
MISA_EXT_INFO(RVU, "u", "User-level instructions"),
256
MISA_EXT_INFO(RVH, "h", "Hypervisor"),
257
- MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
258
MISA_EXT_INFO(RVV, "v", "Vector operations"),
259
MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
260
MISA_EXT_INFO(RVB, "b", "Bit manipulation (Zba_Zbb_Zbs)")
261
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
262
index XXXXXXX..XXXXXXX 100644
263
--- a/target/riscv/cpu_helper.c
264
+++ b/target/riscv/cpu_helper.c
265
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
266
flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
267
flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
268
flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
269
- if (env->cur_pmmask != 0) {
270
- flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
271
- }
272
- if (env->cur_pmbase != 0) {
273
- flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
274
- }
275
276
*pflags = flags;
277
}
278
279
-void riscv_cpu_update_mask(CPURISCVState *env)
280
-{
281
- target_ulong mask = 0, base = 0;
282
- RISCVMXL xl = env->xl;
283
- /*
284
- * TODO: Current RVJ spec does not specify
285
- * how the extension interacts with XLEN.
286
- */
287
-#ifndef CONFIG_USER_ONLY
288
- int mode = cpu_address_mode(env);
289
- xl = cpu_get_xl(env, mode);
290
- if (riscv_has_ext(env, RVJ)) {
291
- switch (mode) {
292
- case PRV_M:
293
- if (env->mmte & M_PM_ENABLE) {
294
- mask = env->mpmmask;
295
- base = env->mpmbase;
296
- }
297
- break;
298
- case PRV_S:
299
- if (env->mmte & S_PM_ENABLE) {
300
- mask = env->spmmask;
301
- base = env->spmbase;
302
- }
303
- break;
304
- case PRV_U:
305
- if (env->mmte & U_PM_ENABLE) {
306
- mask = env->upmmask;
307
- base = env->upmbase;
308
- }
309
- break;
310
- default:
311
- g_assert_not_reached();
312
- }
313
- }
314
-#endif
315
- if (xl == MXL_RV32) {
316
- env->cur_pmmask = mask & UINT32_MAX;
317
- env->cur_pmbase = base & UINT32_MAX;
318
- } else {
319
- env->cur_pmmask = mask;
320
- env->cur_pmbase = base;
321
- }
322
-}
323
-
324
#ifndef CONFIG_USER_ONLY
325
326
/*
327
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
328
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
329
env->priv = newpriv;
330
env->xl = cpu_recompute_xl(env);
331
- riscv_cpu_update_mask(env);
332
333
/*
334
* Clear the load reservation - otherwise a reservation placed in one
335
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
336
index XXXXXXX..XXXXXXX 100644
337
--- a/target/riscv/csr.c
338
+++ b/target/riscv/csr.c
339
@@ -XXX,XX +XXX,XX @@ static RISCVException hgatp(CPURISCVState *env, int csrno)
340
return hmode(env, csrno);
341
}
342
343
-/* Checks if PointerMasking registers could be accessed */
344
-static RISCVException pointer_masking(CPURISCVState *env, int csrno)
345
-{
346
- /* Check if j-ext is present */
347
- if (riscv_has_ext(env, RVJ)) {
348
- return RISCV_EXCP_NONE;
349
- }
350
- return RISCV_EXCP_ILLEGAL_INST;
351
-}
352
-
353
static RISCVException aia_hmode(CPURISCVState *env, int csrno)
354
{
355
if (!riscv_cpu_cfg(env)->ext_ssaia) {
356
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
357
env->xl = cpu_recompute_xl(env);
358
}
359
360
- riscv_cpu_update_mask(env);
361
return RISCV_EXCP_NONE;
362
}
363
364
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mcontext(CPURISCVState *env, int csrno,
365
return RISCV_EXCP_NONE;
366
}
367
368
-/*
369
- * Functions to access Pointer Masking feature registers
370
- * We have to check if current priv lvl could modify
371
- * csr in given mode
372
- */
373
-static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
374
-{
375
- int csr_priv = get_field(csrno, 0x300);
376
- int pm_current;
377
-
378
- if (env->debugger) {
379
- return false;
380
- }
381
- /*
382
- * If priv lvls differ that means we're accessing csr from higher priv lvl,
383
- * so allow the access
384
- */
385
- if (env->priv != csr_priv) {
386
- return false;
387
- }
388
- switch (env->priv) {
389
- case PRV_M:
390
- pm_current = get_field(env->mmte, M_PM_CURRENT);
391
- break;
392
- case PRV_S:
393
- pm_current = get_field(env->mmte, S_PM_CURRENT);
394
- break;
395
- case PRV_U:
396
- pm_current = get_field(env->mmte, U_PM_CURRENT);
397
- break;
398
- default:
399
- g_assert_not_reached();
400
- }
401
- /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
402
- return !pm_current;
403
-}
404
-
405
-static RISCVException read_mmte(CPURISCVState *env, int csrno,
406
- target_ulong *val)
407
-{
408
- *val = env->mmte & MMTE_MASK;
409
- return RISCV_EXCP_NONE;
410
-}
411
-
412
-static RISCVException write_mmte(CPURISCVState *env, int csrno,
413
- target_ulong val)
414
-{
415
- uint64_t mstatus;
416
- target_ulong wpri_val = val & MMTE_MASK;
417
-
418
- if (val != wpri_val) {
419
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
420
- TARGET_FMT_lx "\n", "MMTE: WPRI violation written 0x",
421
- val, "vs expected 0x", wpri_val);
422
- }
423
- /* for machine mode pm.current is hardwired to 1 */
424
- wpri_val |= MMTE_M_PM_CURRENT;
425
-
426
- /* hardwiring pm.instruction bit to 0, since it's not supported yet */
427
- wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
428
- env->mmte = wpri_val | EXT_STATUS_DIRTY;
429
- riscv_cpu_update_mask(env);
430
-
431
- /* Set XS and SD bits, since PM CSRs are dirty */
432
- mstatus = env->mstatus | MSTATUS_XS;
433
- write_mstatus(env, csrno, mstatus);
434
- return RISCV_EXCP_NONE;
435
-}
436
-
437
-static RISCVException read_smte(CPURISCVState *env, int csrno,
438
- target_ulong *val)
439
-{
440
- *val = env->mmte & SMTE_MASK;
441
- return RISCV_EXCP_NONE;
442
-}
443
-
444
-static RISCVException write_smte(CPURISCVState *env, int csrno,
445
- target_ulong val)
446
-{
447
- target_ulong wpri_val = val & SMTE_MASK;
448
-
449
- if (val != wpri_val) {
450
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
451
- TARGET_FMT_lx "\n", "SMTE: WPRI violation written 0x",
452
- val, "vs expected 0x", wpri_val);
453
- }
454
-
455
- /* if pm.current==0 we can't modify current PM CSRs */
456
- if (check_pm_current_disabled(env, csrno)) {
457
- return RISCV_EXCP_NONE;
458
- }
459
-
460
- wpri_val |= (env->mmte & ~SMTE_MASK);
461
- write_mmte(env, csrno, wpri_val);
462
- return RISCV_EXCP_NONE;
463
-}
464
-
465
-static RISCVException read_umte(CPURISCVState *env, int csrno,
466
- target_ulong *val)
467
-{
468
- *val = env->mmte & UMTE_MASK;
469
- return RISCV_EXCP_NONE;
470
-}
471
-
472
-static RISCVException write_umte(CPURISCVState *env, int csrno,
473
- target_ulong val)
474
-{
475
- target_ulong wpri_val = val & UMTE_MASK;
476
-
477
- if (val != wpri_val) {
478
- qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s"
479
- TARGET_FMT_lx "\n", "UMTE: WPRI violation written 0x",
480
- val, "vs expected 0x", wpri_val);
481
- }
482
-
483
- if (check_pm_current_disabled(env, csrno)) {
484
- return RISCV_EXCP_NONE;
485
- }
486
-
487
- wpri_val |= (env->mmte & ~UMTE_MASK);
488
- write_mmte(env, csrno, wpri_val);
489
- return RISCV_EXCP_NONE;
490
-}
491
-
492
-static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
493
- target_ulong *val)
494
-{
495
- *val = env->mpmmask;
496
- return RISCV_EXCP_NONE;
497
-}
498
-
499
-static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
500
- target_ulong val)
501
-{
502
- uint64_t mstatus;
503
-
504
- env->mpmmask = val;
505
- if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
506
- env->cur_pmmask = val;
507
- }
508
- env->mmte |= EXT_STATUS_DIRTY;
509
-
510
- /* Set XS and SD bits, since PM CSRs are dirty */
511
- mstatus = env->mstatus | MSTATUS_XS;
512
- write_mstatus(env, csrno, mstatus);
513
- return RISCV_EXCP_NONE;
514
-}
515
-
516
-static RISCVException read_spmmask(CPURISCVState *env, int csrno,
517
- target_ulong *val)
518
-{
519
- *val = env->spmmask;
520
- return RISCV_EXCP_NONE;
521
-}
522
-
523
-static RISCVException write_spmmask(CPURISCVState *env, int csrno,
524
- target_ulong val)
525
-{
526
- uint64_t mstatus;
527
-
528
- /* if pm.current==0 we can't modify current PM CSRs */
529
- if (check_pm_current_disabled(env, csrno)) {
530
- return RISCV_EXCP_NONE;
531
- }
532
- env->spmmask = val;
533
- if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
534
- env->cur_pmmask = val;
535
- if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
536
- env->cur_pmmask &= UINT32_MAX;
537
- }
538
- }
539
- env->mmte |= EXT_STATUS_DIRTY;
540
-
541
- /* Set XS and SD bits, since PM CSRs are dirty */
542
- mstatus = env->mstatus | MSTATUS_XS;
543
- write_mstatus(env, csrno, mstatus);
544
- return RISCV_EXCP_NONE;
545
-}
546
-
547
-static RISCVException read_upmmask(CPURISCVState *env, int csrno,
548
- target_ulong *val)
549
-{
550
- *val = env->upmmask;
551
- return RISCV_EXCP_NONE;
552
-}
553
-
554
-static RISCVException write_upmmask(CPURISCVState *env, int csrno,
555
- target_ulong val)
556
-{
557
- uint64_t mstatus;
558
-
559
- /* if pm.current==0 we can't modify current PM CSRs */
560
- if (check_pm_current_disabled(env, csrno)) {
561
- return RISCV_EXCP_NONE;
562
- }
563
- env->upmmask = val;
564
- if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
565
- env->cur_pmmask = val;
566
- if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
567
- env->cur_pmmask &= UINT32_MAX;
568
- }
569
- }
570
- env->mmte |= EXT_STATUS_DIRTY;
571
-
572
- /* Set XS and SD bits, since PM CSRs are dirty */
573
- mstatus = env->mstatus | MSTATUS_XS;
574
- write_mstatus(env, csrno, mstatus);
575
- return RISCV_EXCP_NONE;
576
-}
577
-
578
-static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
579
- target_ulong *val)
580
-{
581
- *val = env->mpmbase;
582
- return RISCV_EXCP_NONE;
583
-}
584
-
585
-static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
586
- target_ulong val)
587
-{
588
- uint64_t mstatus;
589
-
590
- env->mpmbase = val;
591
- if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
592
- env->cur_pmbase = val;
593
- }
594
- env->mmte |= EXT_STATUS_DIRTY;
595
-
596
- /* Set XS and SD bits, since PM CSRs are dirty */
597
- mstatus = env->mstatus | MSTATUS_XS;
598
- write_mstatus(env, csrno, mstatus);
599
- return RISCV_EXCP_NONE;
600
-}
601
-
602
-static RISCVException read_spmbase(CPURISCVState *env, int csrno,
603
- target_ulong *val)
604
-{
605
- *val = env->spmbase;
606
- return RISCV_EXCP_NONE;
607
-}
608
-
609
-static RISCVException write_spmbase(CPURISCVState *env, int csrno,
610
- target_ulong val)
611
-{
612
- uint64_t mstatus;
613
-
614
- /* if pm.current==0 we can't modify current PM CSRs */
615
- if (check_pm_current_disabled(env, csrno)) {
616
- return RISCV_EXCP_NONE;
617
- }
618
- env->spmbase = val;
619
- if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
620
- env->cur_pmbase = val;
621
- if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
622
- env->cur_pmbase &= UINT32_MAX;
623
- }
624
- }
625
- env->mmte |= EXT_STATUS_DIRTY;
626
-
627
- /* Set XS and SD bits, since PM CSRs are dirty */
628
- mstatus = env->mstatus | MSTATUS_XS;
629
- write_mstatus(env, csrno, mstatus);
630
- return RISCV_EXCP_NONE;
631
-}
632
-
633
-static RISCVException read_upmbase(CPURISCVState *env, int csrno,
634
- target_ulong *val)
635
-{
636
- *val = env->upmbase;
637
- return RISCV_EXCP_NONE;
638
-}
639
-
640
-static RISCVException write_upmbase(CPURISCVState *env, int csrno,
641
- target_ulong val)
642
-{
643
- uint64_t mstatus;
644
-
645
- /* if pm.current==0 we can't modify current PM CSRs */
646
- if (check_pm_current_disabled(env, csrno)) {
647
- return RISCV_EXCP_NONE;
648
- }
649
- env->upmbase = val;
650
- if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
651
- env->cur_pmbase = val;
652
- if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
653
- env->cur_pmbase &= UINT32_MAX;
654
- }
655
- }
656
- env->mmte |= EXT_STATUS_DIRTY;
657
-
658
- /* Set XS and SD bits, since PM CSRs are dirty */
659
- mstatus = env->mstatus | MSTATUS_XS;
660
- write_mstatus(env, csrno, mstatus);
661
- return RISCV_EXCP_NONE;
662
-}
663
-
664
#endif
665
666
/* Crypto Extension */
667
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
668
[CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
669
[CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
670
671
- /* User Pointer Masking */
672
- [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
673
- [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
674
- write_upmmask },
675
- [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
676
- write_upmbase },
677
- /* Machine Pointer Masking */
678
- [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
679
- [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
680
- write_mpmmask },
681
- [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
682
- write_mpmbase },
683
- /* Supervisor Pointer Masking */
684
- [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
685
- [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
686
- write_spmmask },
687
- [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
688
- write_spmbase },
689
-
690
/* Performance Counters */
691
[CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
692
[CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
693
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
694
index XXXXXXX..XXXXXXX 100644
695
--- a/target/riscv/machine.c
696
+++ b/target/riscv/machine.c
697
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_vector = {
698
699
static bool pointermasking_needed(void *opaque)
700
{
701
- RISCVCPU *cpu = opaque;
702
- CPURISCVState *env = &cpu->env;
703
-
704
- return riscv_has_ext(env, RVJ);
705
+ return false;
706
}
707
708
static const VMStateDescription vmstate_pointermasking = {
709
.name = "cpu/pointer_masking",
710
- .version_id = 1,
711
- .minimum_version_id = 1,
712
+ .version_id = 2,
713
+ .minimum_version_id = 2,
714
.needed = pointermasking_needed,
715
.fields = (const VMStateField[]) {
716
- VMSTATE_UINTTL(env.mmte, RISCVCPU),
717
- VMSTATE_UINTTL(env.mpmmask, RISCVCPU),
718
- VMSTATE_UINTTL(env.mpmbase, RISCVCPU),
719
- VMSTATE_UINTTL(env.spmmask, RISCVCPU),
720
- VMSTATE_UINTTL(env.spmbase, RISCVCPU),
721
- VMSTATE_UINTTL(env.upmmask, RISCVCPU),
722
- VMSTATE_UINTTL(env.upmbase, RISCVCPU),
723
724
VMSTATE_END_OF_LIST()
725
}
726
@@ -XXX,XX +XXX,XX @@ static int riscv_cpu_post_load(void *opaque, int version_id)
727
CPURISCVState *env = &cpu->env;
728
729
env->xl = cpu_recompute_xl(env);
730
- riscv_cpu_update_mask(env);
731
return 0;
732
}
733
734
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
735
index XXXXXXX..XXXXXXX 100644
736
--- a/target/riscv/tcg/tcg-cpu.c
737
+++ b/target/riscv/tcg/tcg-cpu.c
738
@@ -XXX,XX +XXX,XX @@ static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
739
MISA_CFG(RVS, true),
740
MISA_CFG(RVU, true),
741
MISA_CFG(RVH, true),
742
- MISA_CFG(RVJ, false),
743
MISA_CFG(RVV, false),
744
MISA_CFG(RVG, false),
745
MISA_CFG(RVB, false),
746
@@ -XXX,XX +XXX,XX @@ static void riscv_init_max_cpu_extensions(Object *obj)
747
CPURISCVState *env = &cpu->env;
748
const RISCVCPUMultiExtConfig *prop;
749
750
- /* Enable RVG, RVJ and RVV that are disabled by default */
751
- riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVJ | RVV);
752
+ /* Enable RVG and RVV that are disabled by default */
753
+ riscv_cpu_set_misa_ext(env, env->misa_ext | RVB | RVG | RVV);
754
755
for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
756
isa_ext_update_enabled(cpu, prop->offset, true);
757
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
758
index XXXXXXX..XXXXXXX 100644
759
--- a/target/riscv/translate.c
760
+++ b/target/riscv/translate.c
761
@@ -XXX,XX +XXX,XX @@ static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
762
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
763
static TCGv load_res;
764
static TCGv load_val;
765
-/* globals for PM CSRs */
766
-static TCGv pm_mask;
767
-static TCGv pm_base;
768
769
/*
770
* If an operation is being performed on less than TARGET_LONG_BITS,
771
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
772
bool vl_eq_vlmax;
773
CPUState *cs;
774
TCGv zero;
775
- /* PointerMasking extension */
776
- bool pm_mask_enabled;
777
- bool pm_base_enabled;
778
/* Ztso */
779
bool ztso;
780
/* Use icount trigger for native debug */
781
@@ -XXX,XX +XXX,XX @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm)
782
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
783
784
tcg_gen_addi_tl(addr, src1, imm);
785
- if (ctx->pm_mask_enabled) {
786
- tcg_gen_andc_tl(addr, addr, pm_mask);
787
- } else if (get_address_xl(ctx) == MXL_RV32) {
788
+ if (get_address_xl(ctx) == MXL_RV32) {
789
tcg_gen_ext32u_tl(addr, addr);
790
}
791
- if (ctx->pm_base_enabled) {
792
- tcg_gen_or_tl(addr, addr, pm_base);
793
- }
794
795
return addr;
796
}
797
@@ -XXX,XX +XXX,XX @@ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs)
798
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
799
800
tcg_gen_add_tl(addr, src1, offs);
801
- if (ctx->pm_mask_enabled) {
802
- tcg_gen_andc_tl(addr, addr, pm_mask);
803
- } else if (get_xl(ctx) == MXL_RV32) {
804
+ if (get_xl(ctx) == MXL_RV32) {
805
tcg_gen_ext32u_tl(addr, addr);
806
}
807
- if (ctx->pm_base_enabled) {
808
- tcg_gen_or_tl(addr, addr, pm_base);
809
- }
810
+
811
return addr;
812
}
813
814
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
815
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
816
ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
817
ctx->cs = cs;
818
- ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
819
- ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
820
ctx->ztso = cpu->cfg.ext_ztso;
821
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
822
ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED);
823
@@ -XXX,XX +XXX,XX @@ void riscv_translate_init(void)
824
"load_res");
825
load_val = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, load_val),
826
"load_val");
827
- /* Assign PM CSRs to tcg globals */
828
- pm_mask = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmmask),
829
- "pmmask");
830
- pm_base = tcg_global_mem_new(tcg_env, offsetof(CPURISCVState, cur_pmbase),
831
- "pmbase");
832
}
833
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
25
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
834
index XXXXXXX..XXXXXXX 100644
26
index XXXXXXX..XXXXXXX 100644
835
--- a/target/riscv/vector_helper.c
27
--- a/target/riscv/vector_helper.c
836
+++ b/target/riscv/vector_helper.c
28
+++ b/target/riscv/vector_helper.c
837
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
29
@@ -XXX,XX +XXX,XX @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
838
30
uint32_t esz = 1 << log2_esz;
839
static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
31
uint32_t msize = nf * esz;
840
{
32
uint32_t vma = vext_vma(desc);
841
- return (addr & ~env->cur_pmmask) | env->cur_pmbase;
33
- target_ulong addr, offset, remain, page_split, elems;
842
+ return addr;
34
+ target_ulong addr, addr_probe, addr_i, offset, remain, page_split, elems;
843
}
35
int mmu_index = riscv_env_mmu_index(env, false);
844
36
+ int flags;
845
/*
37
+ void *host;
38
39
VSTART_CHECK_EARLY_EXIT(env);
40
41
- /* probe every access */
42
- for (i = env->vstart; i < env->vl; i++) {
43
- if (!vm && !vext_elem_mask(v0, i)) {
44
- continue;
45
- }
46
- addr = adjust_addr(env, base + i * (nf << log2_esz));
47
- if (i == 0) {
48
- /* Allow fault on first element. */
49
- probe_pages(env, addr, nf << log2_esz, ra, MMU_DATA_LOAD);
50
- } else {
51
- remain = nf << log2_esz;
52
- while (remain > 0) {
53
- void *host;
54
- int flags;
55
-
56
- offset = -(addr | TARGET_PAGE_MASK);
57
-
58
- /* Probe nonfault on subsequent elements. */
59
- flags = probe_access_flags(env, addr, offset, MMU_DATA_LOAD,
60
- mmu_index, true, &host, 0);
61
-
62
- /*
63
- * Stop if invalid (unmapped) or mmio (transaction may fail).
64
- * Do not stop if watchpoint, as the spec says that
65
- * first-fault should continue to access the same
66
- * elements regardless of any watchpoint.
67
- */
68
- if (flags & ~TLB_WATCHPOINT) {
69
- vl = i;
70
- goto ProbeSuccess;
71
- }
72
- if (remain <= offset) {
73
- break;
74
+ addr = base + ((env->vstart * nf) << log2_esz);
75
+ page_split = -(addr | TARGET_PAGE_MASK);
76
+ /* Get number of elements */
77
+ elems = page_split / msize;
78
+ if (unlikely(env->vstart + elems >= env->vl)) {
79
+ elems = env->vl - env->vstart;
80
+ }
81
+
82
+ /* Check page permission/pmp/watchpoint/etc. */
83
+ flags = probe_access_flags(env, adjust_addr(env, addr), elems * msize,
84
+ MMU_DATA_LOAD, mmu_index, true, &host, ra);
85
+
86
+ /* If we are crossing a page check also the second page. */
87
+ if (env->vl > elems) {
88
+ addr_probe = addr + (elems << log2_esz);
89
+ flags |= probe_access_flags(env, adjust_addr(env, addr_probe),
90
+ elems * msize, MMU_DATA_LOAD, mmu_index,
91
+ true, &host, ra);
92
+ }
93
+
94
+ if (flags & ~TLB_WATCHPOINT) {
95
+ /* probe every access */
96
+ for (i = env->vstart; i < env->vl; i++) {
97
+ if (!vm && !vext_elem_mask(v0, i)) {
98
+ continue;
99
+ }
100
+ addr_i = adjust_addr(env, base + i * (nf << log2_esz));
101
+ if (i == 0) {
102
+ /* Allow fault on first element. */
103
+ probe_pages(env, addr_i, nf << log2_esz, ra, MMU_DATA_LOAD);
104
+ } else {
105
+ remain = nf << log2_esz;
106
+ while (remain > 0) {
107
+ offset = -(addr_i | TARGET_PAGE_MASK);
108
+
109
+ /* Probe nonfault on subsequent elements. */
110
+ flags = probe_access_flags(env, addr_i, offset,
111
+ MMU_DATA_LOAD, mmu_index, true,
112
+ &host, 0);
113
+
114
+ /*
115
+ * Stop if invalid (unmapped) or mmio (transaction may
116
+ * fail). Do not stop if watchpoint, as the spec says that
117
+ * first-fault should continue to access the same
118
+ * elements regardless of any watchpoint.
119
+ */
120
+ if (flags & ~TLB_WATCHPOINT) {
121
+ vl = i;
122
+ goto ProbeSuccess;
123
+ }
124
+ if (remain <= offset) {
125
+ break;
126
+ }
127
+ remain -= offset;
128
+ addr_i = adjust_addr(env, addr_i + offset);
129
}
130
- remain -= offset;
131
- addr = adjust_addr(env, addr + offset);
132
}
133
}
134
}
135
@@ -XXX,XX +XXX,XX @@ ProbeSuccess:
136
137
if (env->vstart < env->vl) {
138
if (vm) {
139
- /* Calculate the page range of first page */
140
- addr = base + ((env->vstart * nf) << log2_esz);
141
- page_split = -(addr | TARGET_PAGE_MASK);
142
- /* Get number of elements */
143
- elems = page_split / msize;
144
- if (unlikely(env->vstart + elems >= env->vl)) {
145
- elems = env->vl - env->vstart;
146
- }
147
-
148
/* Load/store elements in the first page */
149
if (likely(elems)) {
150
vext_page_ldst_us(env, vd, addr, elems, nf, max_elems,
846
--
151
--
847
2.47.1
152
2.48.1
diff view generated by jsdifflib
1
From: Atish Patra <atishp@rivosinc.com>
1
From: Deepak Gupta <debug@rivosinc.com>
2
2
3
As per the ratified AIA spec v1.0, three stateen bits control AIA CSR
3
Commit:8205bc1 ("target/riscv: introduce ssp and enabling controls for
4
access.
4
zicfiss") introduced CSR_SSP but it mis-interpreted the spec on access
5
to CSR_SSP in M-mode. Gated to CSR_SSP is not gated via `xSSE`. But
6
rather rules clearly specified in section "22.2.1. Shadow Stack Pointer
7
(ssp) CSR access contr" in the priv spec.
5
8
6
Bit 60 controls the indirect CSRs
9
Fixes: 8205bc127a83 ("target/riscv: introduce ssp and enabling controls
7
Bit 59 controls the most AIA CSR state
10
for zicfiss". Thanks to Adam Zabrocki for bringing this to attention.
8
Bit 58 controls the IMSIC state such as stopei and vstopei
9
11
10
Enable the corresponding bits in [m|h]stateen and enable corresponding
12
Reported-by: Adam Zabrocki <azabrocki@nvidia.com>
11
checks in the CSR accessor functions.
13
Signed-off-by: Deepak Gupta <debug@rivosinc.com>
12
13
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
14
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Atish Patra <atishp@rivosinc.com>
15
Message-ID: <20250306064636.452396-1-debug@rivosinc.com>
15
Message-ID: <20250110-counter_delegation-v5-3-e83d797ae294@rivosinc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
17
---
18
target/riscv/csr.c | 85 +++++++++++++++++++++++++++++++++++++++++++++-
18
target/riscv/csr.c | 5 +++++
19
1 file changed, 84 insertions(+), 1 deletion(-)
19
1 file changed, 5 insertions(+)
20
20
21
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
21
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
22
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/csr.c
23
--- a/target/riscv/csr.c
24
+++ b/target/riscv/csr.c
24
+++ b/target/riscv/csr.c
25
@@ -XXX,XX +XXX,XX @@ static RISCVException smode32(CPURISCVState *env, int csrno)
25
@@ -XXX,XX +XXX,XX @@ static RISCVException cfi_ss(CPURISCVState *env, int csrno)
26
27
static RISCVException aia_smode(CPURISCVState *env, int csrno)
28
{
29
+ int ret;
30
+
31
if (!riscv_cpu_cfg(env)->ext_ssaia) {
32
return RISCV_EXCP_ILLEGAL_INST;
26
return RISCV_EXCP_ILLEGAL_INST;
33
}
27
}
34
28
35
+ if (csrno == CSR_STOPEI) {
29
+ /* If ext implemented, M-mode always have access to SSP CSR */
36
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
30
+ if (env->priv == PRV_M) {
37
+ } else {
31
+ return RISCV_EXCP_NONE;
38
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
39
+ }
32
+ }
40
+
33
+
41
+ if (ret != RISCV_EXCP_NONE) {
34
/* if bcfi not active for current env, access to csr is illegal */
42
+ return ret;
35
if (!cpu_get_bcfien(env)) {
43
+ }
36
#if !defined(CONFIG_USER_ONLY)
44
+
45
return smode(env, csrno);
46
}
47
48
static RISCVException aia_smode32(CPURISCVState *env, int csrno)
49
{
50
+ int ret;
51
+
52
if (!riscv_cpu_cfg(env)->ext_ssaia) {
53
return RISCV_EXCP_ILLEGAL_INST;
54
}
55
56
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
57
+ if (ret != RISCV_EXCP_NONE) {
58
+ return ret;
59
+ }
60
+
61
+ if (ret != RISCV_EXCP_NONE) {
62
+ return ret;
63
+ }
64
+
65
return smode32(env, csrno);
66
}
67
68
@@ -XXX,XX +XXX,XX @@ static RISCVException hgatp(CPURISCVState *env, int csrno)
69
70
static RISCVException aia_hmode(CPURISCVState *env, int csrno)
71
{
72
+ int ret;
73
+
74
if (!riscv_cpu_cfg(env)->ext_ssaia) {
75
return RISCV_EXCP_ILLEGAL_INST;
76
}
77
78
- return hmode(env, csrno);
79
+ if (csrno == CSR_VSTOPEI) {
80
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_IMSIC);
81
+ } else {
82
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
83
+ }
84
+
85
+ if (ret != RISCV_EXCP_NONE) {
86
+ return ret;
87
+ }
88
+
89
+ return hmode(env, csrno);
90
}
91
92
static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
93
{
94
+ int ret;
95
+
96
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
97
+ return RISCV_EXCP_ILLEGAL_INST;
98
+ }
99
+
100
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
101
+ if (ret != RISCV_EXCP_NONE) {
102
+ return ret;
103
+ }
104
+
105
if (!riscv_cpu_cfg(env)->ext_ssaia) {
106
return RISCV_EXCP_ILLEGAL_INST;
107
}
108
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
109
target_ulong wr_mask)
110
{
111
target_ulong *iselect;
112
+ int ret;
113
+
114
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
115
+ if (ret != RISCV_EXCP_NONE) {
116
+ return ret;
117
+ }
118
119
/* Translate CSR number for VS-mode */
120
csrno = csrind_xlate_vs_csrno(env, csrno);
121
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
122
int ret = -EINVAL;
123
target_ulong isel;
124
125
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
126
+ if (ret != RISCV_EXCP_NONE) {
127
+ return ret;
128
+ }
129
+
130
/* Translate CSR number for VS-mode */
131
csrno = csrind_xlate_vs_csrno(env, csrno);
132
133
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
134
wr_mask |= SMSTATEEN0_P1P13;
135
}
136
137
+ if (riscv_cpu_cfg(env)->ext_smaia) {
138
+ wr_mask |= SMSTATEEN0_SVSLCT;
139
+ }
140
+
141
+ /*
142
+ * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
143
+ * implemented. However, that information is with MachineState and we can't
144
+ * figure that out in csr.c. Just enable if Smaia is available.
145
+ */
146
+ if (riscv_cpu_cfg(env)->ext_smaia) {
147
+ wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
148
+ }
149
+
150
return write_mstateen(env, csrno, wr_mask, new_val);
151
}
152
153
@@ -XXX,XX +XXX,XX @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
154
wr_mask |= SMSTATEEN0_FCSR;
155
}
156
157
+ if (riscv_cpu_cfg(env)->ext_ssaia) {
158
+ wr_mask |= SMSTATEEN0_SVSLCT;
159
+ }
160
+
161
+ /*
162
+ * As per the AIA specification, SMSTATEEN0_IMSIC is valid only if IMSIC is
163
+ * implemented. However, that information is with MachineState and we can't
164
+ * figure that out in csr.c. Just enable if Ssaia is available.
165
+ */
166
+ if (riscv_cpu_cfg(env)->ext_ssaia) {
167
+ wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
168
+ }
169
+
170
return write_hstateen(env, csrno, wr_mask, new_val);
171
}
172
173
--
37
--
174
2.47.1
38
2.48.1
diff view generated by jsdifflib
1
From: Clément Léger <cleger@rivosinc.com>
1
From: Deepak Gupta <debug@rivosinc.com>
2
2
3
When the Ssdbltrp extension is enabled, SSTATUS.MDT field is cleared
3
Commit f06bfe3dc38c ("target/riscv: implement zicfiss instructions") adds
4
when executing sret if executed in M-mode. When executing mret/mnret,
4
`ssamoswap` instruction. `ssamoswap` takes the code-point from existing
5
SSTATUS.MDT is cleared.
5
reserved encoding (and not a zimop like other shadow stack instructions).
6
If shadow stack is not enabled (via xenvcfg.SSE) and effective priv is
7
less than M then `ssamoswap` must result in an illegal instruction
8
exception. However if effective priv is M, then `ssamoswap` results in
9
store/AMO access fault. See Section "22.2.3. Shadow Stack Memory
10
Protection" of priv spec.
6
11
7
Signed-off-by: Clément Léger <cleger@rivosinc.com>
12
Fixes: f06bfe3dc38c ("target/riscv: implement zicfiss instructions")
13
14
Reported-by: Ved Shanbhogue <ved@rivosinc.com>
15
Signed-off-by: Deepak Gupta <debug@rivosinc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
16
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20250110125441.3208676-8-cleger@rivosinc.com>
17
Message-ID: <20250306064636.452396-2-debug@rivosinc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
18
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
19
---
12
target/riscv/op_helper.c | 12 ++++++++++++
20
target/riscv/insn_trans/trans_rvzicfiss.c.inc | 17 +++++++++++++++++
13
1 file changed, 12 insertions(+)
21
1 file changed, 17 insertions(+)
14
22
15
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
23
diff --git a/target/riscv/insn_trans/trans_rvzicfiss.c.inc b/target/riscv/insn_trans/trans_rvzicfiss.c.inc
16
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/op_helper.c
25
--- a/target/riscv/insn_trans/trans_rvzicfiss.c.inc
18
+++ b/target/riscv/op_helper.c
26
+++ b/target/riscv/insn_trans/trans_rvzicfiss.c.inc
19
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env)
27
@@ -XXX,XX +XXX,XX @@
20
}
28
* You should have received a copy of the GNU General Public License along with
21
mstatus = set_field(mstatus, MSTATUS_SDT, 0);
29
* this program. If not, see <http://www.gnu.org/licenses/>.
22
}
30
*/
23
+ if (riscv_cpu_cfg(env)->ext_smdbltrp && env->priv >= PRV_M) {
31
+
24
+ mstatus = set_field(mstatus, MSTATUS_MDT, 0);
32
+#define REQUIRE_ZICFISS(ctx) do { \
25
+ }
33
+ if (!ctx->cfg_ptr->ext_zicfiss) { \
26
if (env->priv_ver >= PRIV_VERSION_1_12_0) {
34
+ return false; \
27
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
35
+ } \
28
}
36
+} while (0)
29
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mret(CPURISCVState *env)
37
+
30
if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
38
static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a)
31
mstatus = ssdbltrp_mxret(env, mstatus, prev_priv, prev_virt);
39
{
32
}
40
if (!ctx->bcfi_enabled) {
33
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
41
@@ -XXX,XX +XXX,XX @@ static bool trans_ssrdp(DisasContext *ctx, arg_ssrdp *a)
34
+ mstatus = set_field(mstatus, MSTATUS_MDT, 0);
42
static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a)
35
+ }
43
{
36
if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) {
44
REQUIRE_A_OR_ZAAMO(ctx);
37
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
45
+ REQUIRE_ZICFISS(ctx);
38
}
46
+ if (ctx->priv == PRV_M) {
39
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mnret(CPURISCVState *env)
47
+ generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
40
env->mstatus = ssdbltrp_mxret(env, env->mstatus, prev_priv, prev_virt);
41
}
42
43
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
44
+ if (prev_priv < PRV_M) {
45
+ env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 0);
46
+ }
47
+ }
48
+ }
48
+
49
+
49
if (riscv_has_ext(env, RVH) && prev_virt) {
50
if (!ctx->bcfi_enabled) {
50
riscv_cpu_swap_hypervisor_regs(env);
51
return false;
52
}
53
@@ -XXX,XX +XXX,XX @@ static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a)
54
{
55
REQUIRE_64BIT(ctx);
56
REQUIRE_A_OR_ZAAMO(ctx);
57
+ REQUIRE_ZICFISS(ctx);
58
+ if (ctx->priv == PRV_M) {
59
+ generate_exception(ctx, RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
60
+ }
61
+
62
if (!ctx->bcfi_enabled) {
63
return false;
51
}
64
}
52
--
65
--
53
2.47.1
66
2.48.1
54
55
diff view generated by jsdifflib
1
From: Jason Chien <jason.chien@sifive.com>
1
From: Jason Chien <jason.chien@sifive.com>
2
2
3
This commit introduces a translation tag to avoid invalidating an entry
3
The PPN field in a non-leaf PDT entry is positioned differently from that
4
that should not be invalidated when IOMMU executes invalidation commands.
4
in a leaf PDT entry. The original implementation incorrectly used the leaf
5
E.g. IOTINVAL.VMA with GV=0, AV=0, PSCV=1 invalidates both a mapping
5
entry's PPN mask to extract the PPN from a non-leaf entry, leading to an
6
of single stage translation and a mapping of nested translation with
6
erroneous page table walk.
7
the same PSCID, but only the former one should be invalidated.
7
8
This commit introduces new macros to properly define the fields for
9
non-leaf PDT entries and corrects the page table walk.
8
10
9
Signed-off-by: Jason Chien <jason.chien@sifive.com>
11
Signed-off-by: Jason Chien <jason.chien@sifive.com>
10
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
11
Message-ID: <20241108110147.11178-1-jason.chien@sifive.com>
13
Message-ID: <20250301173751.9446-1-jason.chien@sifive.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
15
---
14
hw/riscv/riscv-iommu.c | 205 ++++++++++++++++++++++++++++++-----------
16
hw/riscv/riscv-iommu-bits.h | 6 +++++-
15
1 file changed, 153 insertions(+), 52 deletions(-)
17
hw/riscv/riscv-iommu.c | 4 ++--
18
2 files changed, 7 insertions(+), 3 deletions(-)
16
19
20
diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/riscv/riscv-iommu-bits.h
23
+++ b/hw/riscv/riscv-iommu-bits.h
24
@@ -XXX,XX +XXX,XX @@ enum riscv_iommu_fq_causes {
25
#define RISCV_IOMMU_DC_MSIPTP_MODE_OFF 0
26
#define RISCV_IOMMU_DC_MSIPTP_MODE_FLAT 1
27
28
+/* 2.2 Process Directory Table */
29
+#define RISCV_IOMMU_PDTE_VALID BIT_ULL(0)
30
+#define RISCV_IOMMU_PDTE_PPN RISCV_IOMMU_PPN_FIELD
31
+
32
/* Translation attributes fields */
33
#define RISCV_IOMMU_PC_TA_V BIT_ULL(0)
34
#define RISCV_IOMMU_PC_TA_RESERVED GENMASK_ULL(63, 32)
35
36
/* First stage context fields */
37
-#define RISCV_IOMMU_PC_FSC_PPN GENMASK_ULL(43, 0)
38
+#define RISCV_IOMMU_PC_FSC_PPN RISCV_IOMMU_ATP_PPN_FIELD
39
#define RISCV_IOMMU_PC_FSC_RESERVED GENMASK_ULL(59, 44)
40
41
enum riscv_iommu_fq_ttypes {
17
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
42
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
18
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
19
--- a/hw/riscv/riscv-iommu.c
44
--- a/hw/riscv/riscv-iommu.c
20
+++ b/hw/riscv/riscv-iommu.c
45
+++ b/hw/riscv/riscv-iommu.c
21
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUContext {
46
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
22
uint64_t msiptp; /* MSI redirection page table pointer */
47
return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT;
23
};
48
}
24
49
le64_to_cpus(&de);
25
+typedef enum RISCVIOMMUTransTag {
50
- if (!(de & RISCV_IOMMU_PC_TA_V)) {
26
+ RISCV_IOMMU_TRANS_TAG_BY, /* Bypass */
51
+ if (!(de & RISCV_IOMMU_PDTE_VALID)) {
27
+ RISCV_IOMMU_TRANS_TAG_SS, /* Single Stage */
52
return RISCV_IOMMU_FQ_CAUSE_PDT_INVALID;
28
+ RISCV_IOMMU_TRANS_TAG_VG, /* G-stage only */
53
}
29
+ RISCV_IOMMU_TRANS_TAG_VN, /* Nested translation */
54
- addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PC_FSC_PPN));
30
+} RISCVIOMMUTransTag;
55
+ addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PDTE_PPN));
31
+
32
/* Address translation cache entry */
33
struct RISCVIOMMUEntry {
34
+ RISCVIOMMUTransTag tag; /* Translation Tag */
35
uint64_t iova:44; /* IOVA Page Number */
36
uint64_t pscid:20; /* Process Soft-Context identifier */
37
uint64_t phys:44; /* Physical Page Number */
38
@@ -XXX,XX +XXX,XX @@ static gboolean riscv_iommu_iot_equal(gconstpointer v1, gconstpointer v2)
39
RISCVIOMMUEntry *t1 = (RISCVIOMMUEntry *) v1;
40
RISCVIOMMUEntry *t2 = (RISCVIOMMUEntry *) v2;
41
return t1->gscid == t2->gscid && t1->pscid == t2->pscid &&
42
- t1->iova == t2->iova;
43
+ t1->iova == t2->iova && t1->tag == t2->tag;
44
}
45
46
static guint riscv_iommu_iot_hash(gconstpointer v)
47
@@ -XXX,XX +XXX,XX @@ static guint riscv_iommu_iot_hash(gconstpointer v)
48
return (guint)t->iova;
49
}
50
51
-/* GV: 1 PSCV: 1 AV: 1 */
52
+/* GV: 0 AV: 0 PSCV: 0 GVMA: 0 */
53
+/* GV: 0 AV: 0 GVMA: 1 */
54
+static
55
+void riscv_iommu_iot_inval_all(gpointer key, gpointer value, gpointer data)
56
+{
57
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
58
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
59
+ if (iot->tag == arg->tag) {
60
+ iot->perm = IOMMU_NONE;
61
+ }
62
+}
63
+
64
+/* GV: 0 AV: 0 PSCV: 1 GVMA: 0 */
65
+static
66
+void riscv_iommu_iot_inval_pscid(gpointer key, gpointer value, gpointer data)
67
+{
68
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
69
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
70
+ if (iot->tag == arg->tag &&
71
+ iot->pscid == arg->pscid) {
72
+ iot->perm = IOMMU_NONE;
73
+ }
74
+}
75
+
76
+/* GV: 0 AV: 1 PSCV: 0 GVMA: 0 */
77
+static
78
+void riscv_iommu_iot_inval_iova(gpointer key, gpointer value, gpointer data)
79
+{
80
+ RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
81
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
82
+ if (iot->tag == arg->tag &&
83
+ iot->iova == arg->iova) {
84
+ iot->perm = IOMMU_NONE;
85
+ }
86
+}
87
+
88
+/* GV: 0 AV: 1 PSCV: 1 GVMA: 0 */
89
static void riscv_iommu_iot_inval_pscid_iova(gpointer key, gpointer value,
90
gpointer data)
91
{
92
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
93
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
94
- if (iot->gscid == arg->gscid &&
95
+ if (iot->tag == arg->tag &&
96
iot->pscid == arg->pscid &&
97
iot->iova == arg->iova) {
98
iot->perm = IOMMU_NONE;
99
}
56
}
100
}
57
101
58
riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
102
-/* GV: 1 PSCV: 1 AV: 0 */
103
-static void riscv_iommu_iot_inval_pscid(gpointer key, gpointer value,
104
- gpointer data)
105
+/* GV: 1 AV: 0 PSCV: 0 GVMA: 0 */
106
+/* GV: 1 AV: 0 GVMA: 1 */
107
+static
108
+void riscv_iommu_iot_inval_gscid(gpointer key, gpointer value, gpointer data)
109
{
110
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
111
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
112
- if (iot->gscid == arg->gscid &&
113
- iot->pscid == arg->pscid) {
114
+ if (iot->tag == arg->tag &&
115
+ iot->gscid == arg->gscid) {
116
iot->perm = IOMMU_NONE;
117
}
118
}
119
120
-/* GV: 1 GVMA: 1 */
121
-static void riscv_iommu_iot_inval_gscid_gpa(gpointer key, gpointer value,
122
- gpointer data)
123
+/* GV: 1 AV: 0 PSCV: 1 GVMA: 0 */
124
+static void riscv_iommu_iot_inval_gscid_pscid(gpointer key, gpointer value,
125
+ gpointer data)
126
{
127
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
128
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
129
- if (iot->gscid == arg->gscid) {
130
- /* simplified cache, no GPA matching */
131
+ if (iot->tag == arg->tag &&
132
+ iot->gscid == arg->gscid &&
133
+ iot->pscid == arg->pscid) {
134
iot->perm = IOMMU_NONE;
135
}
136
}
137
138
-/* GV: 1 GVMA: 0 */
139
-static void riscv_iommu_iot_inval_gscid(gpointer key, gpointer value,
140
- gpointer data)
141
+/* GV: 1 AV: 1 PSCV: 0 GVMA: 0 */
142
+/* GV: 1 AV: 1 GVMA: 1 */
143
+static void riscv_iommu_iot_inval_gscid_iova(gpointer key, gpointer value,
144
+ gpointer data)
145
{
146
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
147
RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
148
- if (iot->gscid == arg->gscid) {
149
+ if (iot->tag == arg->tag &&
150
+ iot->gscid == arg->gscid &&
151
+ iot->iova == arg->iova) {
152
iot->perm = IOMMU_NONE;
153
}
154
}
155
156
-/* GV: 0 */
157
-static void riscv_iommu_iot_inval_all(gpointer key, gpointer value,
158
- gpointer data)
159
+/* GV: 1 AV: 1 PSCV: 1 GVMA: 0 */
160
+static void riscv_iommu_iot_inval_gscid_pscid_iova(gpointer key, gpointer value,
161
+ gpointer data)
162
{
163
RISCVIOMMUEntry *iot = (RISCVIOMMUEntry *) value;
164
- iot->perm = IOMMU_NONE;
165
+ RISCVIOMMUEntry *arg = (RISCVIOMMUEntry *) data;
166
+ if (iot->tag == arg->tag &&
167
+ iot->gscid == arg->gscid &&
168
+ iot->pscid == arg->pscid &&
169
+ iot->iova == arg->iova) {
170
+ iot->perm = IOMMU_NONE;
171
+ }
172
}
173
174
/* caller should keep ref-count for iot_cache object */
175
static RISCVIOMMUEntry *riscv_iommu_iot_lookup(RISCVIOMMUContext *ctx,
176
- GHashTable *iot_cache, hwaddr iova)
177
+ GHashTable *iot_cache, hwaddr iova, RISCVIOMMUTransTag transtag)
178
{
179
RISCVIOMMUEntry key = {
180
+ .tag = transtag,
181
.gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID),
182
.pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID),
183
.iova = PPN_DOWN(iova),
184
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_iot_update(RISCVIOMMUState *s,
185
}
186
187
static void riscv_iommu_iot_inval(RISCVIOMMUState *s, GHFunc func,
188
- uint32_t gscid, uint32_t pscid, hwaddr iova)
189
+ uint32_t gscid, uint32_t pscid, hwaddr iova, RISCVIOMMUTransTag transtag)
190
{
191
GHashTable *iot_cache;
192
RISCVIOMMUEntry key = {
193
+ .tag = transtag,
194
.gscid = gscid,
195
.pscid = pscid,
196
.iova = PPN_DOWN(iova),
197
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_iot_inval(RISCVIOMMUState *s, GHFunc func,
198
g_hash_table_unref(iot_cache);
199
}
200
201
+static RISCVIOMMUTransTag riscv_iommu_get_transtag(RISCVIOMMUContext *ctx)
202
+{
203
+ uint64_t satp = get_field(ctx->satp, RISCV_IOMMU_ATP_MODE_FIELD);
204
+ uint64_t gatp = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD);
205
+
206
+ if (satp == RISCV_IOMMU_DC_FSC_MODE_BARE) {
207
+ return (gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) ?
208
+ RISCV_IOMMU_TRANS_TAG_BY : RISCV_IOMMU_TRANS_TAG_VG;
209
+ } else {
210
+ return (gatp == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) ?
211
+ RISCV_IOMMU_TRANS_TAG_SS : RISCV_IOMMU_TRANS_TAG_VN;
212
+ }
213
+}
214
+
215
static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
216
IOMMUTLBEntry *iotlb, bool enable_cache)
217
{
218
+ RISCVIOMMUTransTag transtag = riscv_iommu_get_transtag(ctx);
219
RISCVIOMMUEntry *iot;
220
IOMMUAccessFlags perm;
221
bool enable_pid;
222
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
223
}
224
}
225
226
- iot = riscv_iommu_iot_lookup(ctx, iot_cache, iotlb->iova);
227
+ iot = riscv_iommu_iot_lookup(ctx, iot_cache, iotlb->iova, transtag);
228
perm = iot ? iot->perm : IOMMU_NONE;
229
if (perm != IOMMU_NONE) {
230
iotlb->translated_addr = PPN_PHYS(iot->phys);
231
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
232
iot->gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
233
iot->pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
234
iot->perm = iotlb->perm;
235
+ iot->tag = transtag;
236
riscv_iommu_iot_update(s, iot_cache, iot);
237
}
238
239
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_process_cq_tail(RISCVIOMMUState *s)
240
241
case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOTINVAL_FUNC_GVMA,
242
RISCV_IOMMU_CMD_IOTINVAL_OPCODE):
243
- if (cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV) {
244
+ {
245
+ bool gv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV);
246
+ bool av = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV);
247
+ bool pscv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV);
248
+ uint32_t gscid = get_field(cmd.dword0,
249
+ RISCV_IOMMU_CMD_IOTINVAL_GSCID);
250
+ uint32_t pscid = get_field(cmd.dword0,
251
+ RISCV_IOMMU_CMD_IOTINVAL_PSCID);
252
+ hwaddr iova = (cmd.dword1 << 2) & TARGET_PAGE_MASK;
253
+
254
+ if (pscv) {
255
/* illegal command arguments IOTINVAL.GVMA & PSCV == 1 */
256
goto cmd_ill;
257
- } else if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV)) {
258
- /* invalidate all cache mappings */
259
- func = riscv_iommu_iot_inval_all;
260
- } else if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV)) {
261
- /* invalidate cache matching GSCID */
262
- func = riscv_iommu_iot_inval_gscid;
263
- } else {
264
- /* invalidate cache matching GSCID and ADDR (GPA) */
265
- func = riscv_iommu_iot_inval_gscid_gpa;
266
}
267
- riscv_iommu_iot_inval(s, func,
268
- get_field(cmd.dword0, RISCV_IOMMU_CMD_IOTINVAL_GSCID), 0,
269
- cmd.dword1 << 2 & TARGET_PAGE_MASK);
270
+
271
+ func = riscv_iommu_iot_inval_all;
272
+
273
+ if (gv) {
274
+ func = (av) ? riscv_iommu_iot_inval_gscid_iova :
275
+ riscv_iommu_iot_inval_gscid;
276
+ }
277
+
278
+ riscv_iommu_iot_inval(
279
+ s, func, gscid, pscid, iova, RISCV_IOMMU_TRANS_TAG_VG);
280
+
281
+ riscv_iommu_iot_inval(
282
+ s, func, gscid, pscid, iova, RISCV_IOMMU_TRANS_TAG_VN);
283
break;
284
+ }
285
286
case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IOTINVAL_FUNC_VMA,
287
RISCV_IOMMU_CMD_IOTINVAL_OPCODE):
288
- if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV)) {
289
- /* invalidate all cache mappings, simplified model */
290
- func = riscv_iommu_iot_inval_all;
291
- } else if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV)) {
292
- /* invalidate cache matching GSCID, simplified model */
293
- func = riscv_iommu_iot_inval_gscid;
294
- } else if (!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV)) {
295
- /* invalidate cache matching GSCID and PSCID */
296
- func = riscv_iommu_iot_inval_pscid;
297
+ {
298
+ bool gv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_GV);
299
+ bool av = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_AV);
300
+ bool pscv = !!(cmd.dword0 & RISCV_IOMMU_CMD_IOTINVAL_PSCV);
301
+ uint32_t gscid = get_field(cmd.dword0,
302
+ RISCV_IOMMU_CMD_IOTINVAL_GSCID);
303
+ uint32_t pscid = get_field(cmd.dword0,
304
+ RISCV_IOMMU_CMD_IOTINVAL_PSCID);
305
+ hwaddr iova = (cmd.dword1 << 2) & TARGET_PAGE_MASK;
306
+ RISCVIOMMUTransTag transtag;
307
+
308
+ if (gv) {
309
+ transtag = RISCV_IOMMU_TRANS_TAG_VN;
310
+ if (pscv) {
311
+ func = (av) ? riscv_iommu_iot_inval_gscid_pscid_iova :
312
+ riscv_iommu_iot_inval_gscid_pscid;
313
+ } else {
314
+ func = (av) ? riscv_iommu_iot_inval_gscid_iova :
315
+ riscv_iommu_iot_inval_gscid;
316
+ }
317
} else {
318
- /* invalidate cache matching GSCID and PSCID and ADDR (IOVA) */
319
- func = riscv_iommu_iot_inval_pscid_iova;
320
+ transtag = RISCV_IOMMU_TRANS_TAG_SS;
321
+ if (pscv) {
322
+ func = (av) ? riscv_iommu_iot_inval_pscid_iova :
323
+ riscv_iommu_iot_inval_pscid;
324
+ } else {
325
+ func = (av) ? riscv_iommu_iot_inval_iova :
326
+ riscv_iommu_iot_inval_all;
327
+ }
328
}
329
- riscv_iommu_iot_inval(s, func,
330
- get_field(cmd.dword0, RISCV_IOMMU_CMD_IOTINVAL_GSCID),
331
- get_field(cmd.dword0, RISCV_IOMMU_CMD_IOTINVAL_PSCID),
332
- cmd.dword1 << 2 & TARGET_PAGE_MASK);
333
+
334
+ riscv_iommu_iot_inval(s, func, gscid, pscid, iova, transtag);
335
break;
336
+ }
337
338
case RISCV_IOMMU_CMD(RISCV_IOMMU_CMD_IODIR_FUNC_INVAL_DDT,
339
RISCV_IOMMU_CMD_IODIR_OPCODE):
340
--
59
--
341
2.47.1
60
2.48.1
diff view generated by jsdifflib
1
From: Kaiwen Xue <kaiwenx@rivosinc.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
Since xiselect and xireg also will be of use in sxcsrind, AIA should
3
Coverity found the following issue:
4
have its own separated interface when those CSRs are accessed.
5
4
6
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
5
>>> CID 1593156: Integer handling issues (OVERFLOW_BEFORE_WIDEN)
6
>>> Potentially overflowing expression "0x10 << depth" with type
7
"int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then
8
used in a context that expects an expression of type "uint64_t" (64
9
bits, unsigned).
10
4299 depth = 16 << depth;
11
12
Fix it by forcing the expression to be 64 bits wide by using '16ULL'.
13
14
Resolves: Coverity CID 1593156
15
Fixes: c48bd18eae ("target/riscv: Add support for Control Transfer Records extension CSRs.")
16
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
17
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Atish Patra <atishp@rivosinc.com>
18
Message-ID: <20250307124602.1905754-1-dbarboza@ventanamicro.com>
9
Message-ID: <20250110-counter_delegation-v5-2-e83d797ae294@rivosinc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
19
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
20
---
12
target/riscv/csr.c | 165 ++++++++++++++++++++++++++++++++++++++-------
21
target/riscv/csr.c | 2 +-
13
1 file changed, 139 insertions(+), 26 deletions(-)
22
1 file changed, 1 insertion(+), 1 deletion(-)
14
23
15
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
24
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
16
index XXXXXXX..XXXXXXX 100644
25
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/csr.c
26
--- a/target/riscv/csr.c
18
+++ b/target/riscv/csr.c
27
+++ b/target/riscv/csr.c
19
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
20
#include "system/cpu-timers.h"
21
#include "qemu/guest-random.h"
22
#include "qapi/error.h"
23
+#include <stdbool.h>
24
25
/* CSR function table public API */
26
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
27
@@ -XXX,XX +XXX,XX @@ static RISCVException aia_any32(CPURISCVState *env, int csrno)
28
return any32(env, csrno);
29
}
30
31
+static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno)
32
+{
33
+ if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) {
34
+ return RISCV_EXCP_ILLEGAL_INST;
35
+ }
36
+
37
+ return any(env, csrno);
38
+}
39
+
40
static RISCVException smode(CPURISCVState *env, int csrno)
41
{
42
if (riscv_has_ext(env, RVS)) {
43
@@ -XXX,XX +XXX,XX @@ static RISCVException aia_smode32(CPURISCVState *env, int csrno)
44
return smode32(env, csrno);
45
}
46
47
+static bool csrind_extensions_present(CPURISCVState *env)
48
+{
49
+ return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind;
50
+}
51
+
52
+static bool aia_extensions_present(CPURISCVState *env)
53
+{
54
+ return riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_ssaia;
55
+}
56
+
57
+static bool csrind_or_aia_extensions_present(CPURISCVState *env)
58
+{
59
+ return csrind_extensions_present(env) || aia_extensions_present(env);
60
+}
61
+
62
+static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno)
63
+{
64
+ if (!csrind_or_aia_extensions_present(env)) {
65
+ return RISCV_EXCP_ILLEGAL_INST;
66
+ }
67
+
68
+ return smode(env, csrno);
69
+}
70
+
71
static RISCVException hmode(CPURISCVState *env, int csrno)
72
{
73
if (riscv_has_ext(env, RVH)) {
74
@@ -XXX,XX +XXX,XX @@ static RISCVException hmode32(CPURISCVState *env, int csrno)
75
76
}
77
78
+static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno)
79
+{
80
+ if (!csrind_or_aia_extensions_present(env)) {
81
+ return RISCV_EXCP_ILLEGAL_INST;
82
+ }
83
+
84
+ return hmode(env, csrno);
85
+}
86
+
87
static RISCVException umode(CPURISCVState *env, int csrno)
88
{
89
if (riscv_has_ext(env, RVU)) {
90
@@ -XXX,XX +XXX,XX @@ static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
91
};
92
}
93
94
+static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno)
95
+{
96
+ if (!env->virt_enabled) {
97
+ return csrno;
98
+ }
99
+
100
+ switch (csrno) {
101
+ case CSR_SISELECT:
102
+ return CSR_VSISELECT;
103
+ case CSR_SIREG:
104
+ return CSR_VSIREG;
105
+ default:
106
+ return csrno;
107
+ };
108
+}
109
+
110
static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
111
target_ulong *val, target_ulong new_val,
112
target_ulong wr_mask)
113
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
114
target_ulong *iselect;
115
116
/* Translate CSR number for VS-mode */
117
- csrno = aia_xlate_vs_csrno(env, csrno);
118
+ csrno = csrind_xlate_vs_csrno(env, csrno);
119
120
/* Find the iselect CSR based on CSR number */
121
switch (csrno) {
122
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
123
return RISCV_EXCP_NONE;
124
}
125
126
+static bool xiselect_aia_range(target_ulong isel)
127
+{
128
+ return (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) ||
129
+ (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST);
130
+}
131
+
132
static int rmw_iprio(target_ulong xlen,
133
target_ulong iselect, uint8_t *iprio,
134
target_ulong *val, target_ulong new_val,
135
@@ -XXX,XX +XXX,XX @@ static int rmw_iprio(target_ulong xlen,
136
return 0;
137
}
138
139
-static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
140
- target_ulong *val, target_ulong new_val,
141
- target_ulong wr_mask)
142
+static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
143
+ target_ulong isel, target_ulong *val,
144
+ target_ulong new_val, target_ulong wr_mask)
145
{
146
- bool virt, isel_reserved;
147
- uint8_t *iprio;
148
+ bool virt = false, isel_reserved = false;
149
int ret = -EINVAL;
150
- target_ulong priv, isel, vgein;
151
-
152
- /* Translate CSR number for VS-mode */
153
- csrno = aia_xlate_vs_csrno(env, csrno);
154
+ uint8_t *iprio;
155
+ target_ulong priv, vgein;
156
157
- /* Decode register details from CSR number */
158
- virt = false;
159
- isel_reserved = false;
160
+ /* VS-mode CSR number passed in has already been translated */
161
switch (csrno) {
162
case CSR_MIREG:
163
+ if (!riscv_cpu_cfg(env)->ext_smaia) {
164
+ goto done;
165
+ }
166
iprio = env->miprio;
167
- isel = env->miselect;
168
priv = PRV_M;
169
break;
170
case CSR_SIREG:
171
- if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
172
+ if (!riscv_cpu_cfg(env)->ext_ssaia ||
173
+ (env->priv == PRV_S && env->mvien & MIP_SEIP &&
174
env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
175
- env->siselect <= ISELECT_IMSIC_EIE63) {
176
+ env->siselect <= ISELECT_IMSIC_EIE63)) {
177
goto done;
178
}
29
}
179
iprio = env->siprio;
30
180
- isel = env->siselect;
31
/* Update sctrstatus.WRPTR with a legal value */
181
priv = PRV_S;
32
- depth = 16 << depth;
182
break;
33
+ depth = 16ULL << depth;
183
case CSR_VSIREG:
34
env->sctrstatus =
184
+ if (!riscv_cpu_cfg(env)->ext_ssaia) {
35
env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
185
+ goto done;
186
+ }
187
iprio = env->hviprio;
188
- isel = env->vsiselect;
189
priv = PRV_S;
190
virt = true;
191
break;
192
default:
193
- goto done;
194
+ goto done;
195
};
196
197
/* Find the selected guest interrupt file */
198
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
199
}
36
}
200
201
done:
202
+ /*
203
+ * If AIA is not enabled, illegal instruction exception is always
204
+ * returned regardless of whether we are in VS-mode or not
205
+ */
206
if (ret) {
207
return (env->virt_enabled && virt && !isel_reserved) ?
208
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
209
}
210
+
211
+ return RISCV_EXCP_NONE;
212
+}
213
+
214
+static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
215
+ target_ulong *val, target_ulong new_val,
216
+ target_ulong wr_mask)
217
+{
218
+ bool virt = false;
219
+ int ret = -EINVAL;
220
+ target_ulong isel;
221
+
222
+ /* Translate CSR number for VS-mode */
223
+ csrno = csrind_xlate_vs_csrno(env, csrno);
224
+
225
+ /* Decode register details from CSR number */
226
+ switch (csrno) {
227
+ case CSR_MIREG:
228
+ isel = env->miselect;
229
+ break;
230
+ case CSR_SIREG:
231
+ isel = env->siselect;
232
+ break;
233
+ case CSR_VSIREG:
234
+ isel = env->vsiselect;
235
+ virt = true;
236
+ break;
237
+ default:
238
+ goto done;
239
+ };
240
+
241
+ if (xiselect_aia_range(isel)) {
242
+ return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask);
243
+ }
244
+
245
+done:
246
+ if (ret) {
247
+ return (env->virt_enabled && virt) ?
248
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
249
+ }
250
return RISCV_EXCP_NONE;
251
}
252
253
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
254
[CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
255
256
/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
257
- [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
258
- [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
259
+ [CSR_MISELECT] = { "miselect", csrind_or_aia_any, NULL, NULL,
260
+ rmw_xiselect },
261
+ [CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL,
262
+ rmw_xireg },
263
264
/* Machine-Level Interrupts (AIA) */
265
[CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
266
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
267
[CSR_SATP] = { "satp", satp, read_satp, write_satp },
268
269
/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
270
- [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
271
- [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
272
+ [CSR_SISELECT] = { "siselect", csrind_or_aia_smode, NULL, NULL,
273
+ rmw_xiselect },
274
+ [CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL,
275
+ rmw_xireg },
276
277
/* Supervisor-Level Interrupts (AIA) */
278
[CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
279
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
280
/*
281
* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
282
*/
283
- [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
284
- rmw_xiselect },
285
- [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
286
+ [CSR_VSISELECT] = { "vsiselect", csrind_or_aia_hmode, NULL, NULL,
287
+ rmw_xiselect },
288
+ [CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL,
289
+ rmw_xireg },
290
291
/* VS-Level Interrupts (H-extension with AIA) */
292
[CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
293
--
37
--
294
2.47.1
38
2.48.1
diff view generated by jsdifflib
1
From: Craig Blackmore <craig.blackmore@embecosm.com>
1
From: Chao Liu <lc00631@tecorigin.com>
2
2
3
Calling `vext_continuous_ldst_tlb` for load/stores up to 6 bytes
3
Some vector instructions are special, such as the vlm.v instruction,
4
significantly improves performance.
4
where setting its vl actually sets evl = (vl + 7) >> 3. To improve
5
maintainability, we will uniformly use VSTART_CHECK_EARLY_EXIT() to
6
check for the condition vstart >= vl. This function will also handle
7
cases involving evl.
5
8
6
Co-authored-by: Helene CHELIN <helene.chelin@embecosm.com>
9
Fixes: df4252b2ec ("target/riscv/vector_helpers: do early exit when
7
Co-authored-by: Paolo Savini <paolo.savini@embecosm.com>
10
vstart >= vl")
8
Co-authored-by: Craig Blackmore <craig.blackmore@embecosm.com>
11
Signed-off-by: Chao Liu <lc00631@tecorigin.com>
9
10
Signed-off-by: Helene CHELIN <helene.chelin@embecosm.com>
11
Signed-off-by: Paolo Savini <paolo.savini@embecosm.com>
12
Signed-off-by: Craig Blackmore <craig.blackmore@embecosm.com>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
13
Message-ID: <f575979874e323a9e0da7796aa391c7d87e56f88.1741573286.git.lc00631@tecorigin.com>
15
Message-ID: <20241218142353.1027938-3-craig.blackmore@embecosm.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
15
---
18
target/riscv/vector_helper.c | 16 ++++++++++++++++
16
target/riscv/vector_internals.h | 12 +++---
19
1 file changed, 16 insertions(+)
17
target/riscv/vcrypto_helper.c | 32 +++++++--------
18
target/riscv/vector_helper.c | 69 ++++++++++++++++-----------------
19
target/riscv/vector_internals.c | 4 +-
20
4 files changed, 57 insertions(+), 60 deletions(-)
20
21
22
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/vector_internals.h
25
+++ b/target/riscv/vector_internals.h
26
@@ -XXX,XX +XXX,XX @@
27
#include "tcg/tcg-gvec-desc.h"
28
#include "internals.h"
29
30
-#define VSTART_CHECK_EARLY_EXIT(env) do { \
31
- if (env->vstart >= env->vl) { \
32
- env->vstart = 0; \
33
- return; \
34
- } \
35
+#define VSTART_CHECK_EARLY_EXIT(env, vl) do { \
36
+ if (env->vstart >= vl) { \
37
+ env->vstart = 0; \
38
+ return; \
39
+ } \
40
} while (0)
41
42
static inline uint32_t vext_nf(uint32_t desc)
43
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
44
uint32_t vma = vext_vma(desc); \
45
uint32_t i; \
46
\
47
- VSTART_CHECK_EARLY_EXIT(env); \
48
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
49
\
50
for (i = env->vstart; i < vl; i++) { \
51
if (!vm && !vext_elem_mask(v0, i)) { \
52
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/riscv/vcrypto_helper.c
55
+++ b/target/riscv/vcrypto_helper.c
56
@@ -XXX,XX +XXX,XX @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
57
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
58
uint32_t vta = vext_vta(desc); \
59
\
60
- VSTART_CHECK_EARLY_EXIT(env); \
61
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
62
\
63
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
64
AESState round_key; \
65
@@ -XXX,XX +XXX,XX @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
66
uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
67
uint32_t vta = vext_vta(desc); \
68
\
69
- VSTART_CHECK_EARLY_EXIT(env); \
70
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
71
\
72
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
73
AESState round_key; \
74
@@ -XXX,XX +XXX,XX @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
75
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
76
uint32_t vta = vext_vta(desc);
77
78
- VSTART_CHECK_EARLY_EXIT(env);
79
+ VSTART_CHECK_EARLY_EXIT(env, vl);
80
81
uimm &= 0b1111;
82
if (uimm > 10 || uimm == 0) {
83
@@ -XXX,XX +XXX,XX @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
84
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
85
uint32_t vta = vext_vta(desc);
86
87
- VSTART_CHECK_EARLY_EXIT(env);
88
+ VSTART_CHECK_EARLY_EXIT(env, vl);
89
90
uimm &= 0b1111;
91
if (uimm > 14 || uimm < 2) {
92
@@ -XXX,XX +XXX,XX @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
93
uint32_t total_elems;
94
uint32_t vta = vext_vta(desc);
95
96
- VSTART_CHECK_EARLY_EXIT(env);
97
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
98
99
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
100
if (sew == MO_32) {
101
@@ -XXX,XX +XXX,XX @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
102
uint32_t total_elems;
103
uint32_t vta = vext_vta(desc);
104
105
- VSTART_CHECK_EARLY_EXIT(env);
106
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
107
108
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
109
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
110
@@ -XXX,XX +XXX,XX @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
111
uint32_t total_elems;
112
uint32_t vta = vext_vta(desc);
113
114
- VSTART_CHECK_EARLY_EXIT(env);
115
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
116
117
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
118
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
119
@@ -XXX,XX +XXX,XX @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
120
uint32_t total_elems;
121
uint32_t vta = vext_vta(desc);
122
123
- VSTART_CHECK_EARLY_EXIT(env);
124
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
125
126
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
127
vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
128
@@ -XXX,XX +XXX,XX @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
129
uint32_t total_elems;
130
uint32_t vta = vext_vta(desc);
131
132
- VSTART_CHECK_EARLY_EXIT(env);
133
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
134
135
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
136
vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
137
@@ -XXX,XX +XXX,XX @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
138
uint32_t *vs1 = vs1_vptr;
139
uint32_t *vs2 = vs2_vptr;
140
141
- VSTART_CHECK_EARLY_EXIT(env);
142
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
143
144
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
145
uint32_t w[24];
146
@@ -XXX,XX +XXX,XX @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
147
uint32_t *vs2 = vs2_vptr;
148
uint32_t v1[8], v2[8], v3[8];
149
150
- VSTART_CHECK_EARLY_EXIT(env);
151
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
152
153
for (int i = env->vstart / 8; i < env->vl / 8; i++) {
154
for (int k = 0; k < 8; k++) {
155
@@ -XXX,XX +XXX,XX @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
156
uint32_t vta = vext_vta(desc);
157
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
158
159
- VSTART_CHECK_EARLY_EXIT(env);
160
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
161
162
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
163
uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
164
@@ -XXX,XX +XXX,XX @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
165
uint32_t vta = vext_vta(desc);
166
uint32_t total_elems = vext_get_total_elems(env, desc, 4);
167
168
- VSTART_CHECK_EARLY_EXIT(env);
169
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
170
171
for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
172
uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
173
@@ -XXX,XX +XXX,XX @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
174
uint32_t esz = sizeof(uint32_t);
175
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
176
177
- VSTART_CHECK_EARLY_EXIT(env);
178
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
179
180
for (uint32_t i = group_start; i < group_end; ++i) {
181
uint32_t vstart = i * egs;
182
@@ -XXX,XX +XXX,XX @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
183
uint32_t esz = sizeof(uint32_t);
184
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
185
186
- VSTART_CHECK_EARLY_EXIT(env);
187
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
188
189
for (uint32_t i = group_start; i < group_end; ++i) {
190
uint32_t vstart = i * egs;
191
@@ -XXX,XX +XXX,XX @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
192
uint32_t esz = sizeof(uint32_t);
193
uint32_t total_elems = vext_get_total_elems(env, desc, esz);
194
195
- VSTART_CHECK_EARLY_EXIT(env);
196
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
197
198
for (uint32_t i = group_start; i < group_end; ++i) {
199
uint32_t vstart = i * egs;
21
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
200
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
22
index XXXXXXX..XXXXXXX 100644
201
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/vector_helper.c
202
--- a/target/riscv/vector_helper.c
24
+++ b/target/riscv/vector_helper.c
203
+++ b/target/riscv/vector_helper.c
204
@@ -XXX,XX +XXX,XX @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, target_ulong stride,
205
uint32_t esz = 1 << log2_esz;
206
uint32_t vma = vext_vma(desc);
207
208
- VSTART_CHECK_EARLY_EXIT(env);
209
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
210
211
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
212
k = 0;
25
@@ -XXX,XX +XXX,XX @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
213
@@ -XXX,XX +XXX,XX @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
26
return;
214
uint32_t msize = nf * esz;
27
}
215
int mmu_index = riscv_env_mmu_index(env, false);
28
216
29
+#if defined(CONFIG_USER_ONLY)
217
- if (env->vstart >= evl) {
30
+ /*
218
- env->vstart = 0;
31
+ * For data sizes <= 6 bytes we get better performance by simply calling
219
- return;
32
+ * vext_continuous_ldst_tlb
220
- }
33
+ */
221
+ VSTART_CHECK_EARLY_EXIT(env, evl);
34
+ if (nf == 1 && (evl << log2_esz) <= 6) {
222
35
+ addr = base + (env->vstart << log2_esz);
223
#if defined(CONFIG_USER_ONLY)
36
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart, ra,
224
/*
37
+ esz, is_load);
225
@@ -XXX,XX +XXX,XX @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
38
+
226
uint32_t esz = 1 << log2_esz;
39
+ env->vstart = 0;
227
uint32_t vma = vext_vma(desc);
40
+ vext_set_tail_elems_1s(evl, vd, desc, nf, esz, max_elems);
228
41
+ return;
229
- VSTART_CHECK_EARLY_EXIT(env);
42
+ }
230
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
43
+#endif
231
44
+
232
/* load bytes from guest memory */
45
/* Calculate the page range of first page */
233
for (i = env->vstart; i < env->vl; env->vstart = ++i) {
234
@@ -XXX,XX +XXX,XX @@ vext_ldff(void *vd, void *v0, target_ulong base, CPURISCVState *env,
235
int flags;
236
void *host;
237
238
- VSTART_CHECK_EARLY_EXIT(env);
239
+ VSTART_CHECK_EARLY_EXIT(env, env->vl);
240
46
addr = base + ((env->vstart * nf) << log2_esz);
241
addr = base + ((env->vstart * nf) << log2_esz);
47
page_split = -(addr | TARGET_PAGE_MASK);
242
page_split = -(addr | TARGET_PAGE_MASK);
243
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
244
uint32_t vta = vext_vta(desc); \
245
uint32_t i; \
246
\
247
- VSTART_CHECK_EARLY_EXIT(env); \
248
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
249
\
250
for (i = env->vstart; i < vl; i++) { \
251
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
252
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
253
uint32_t vta = vext_vta(desc); \
254
uint32_t i; \
255
\
256
- VSTART_CHECK_EARLY_EXIT(env); \
257
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
258
\
259
for (i = env->vstart; i < vl; i++) { \
260
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
261
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
262
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
263
uint32_t i; \
264
\
265
- VSTART_CHECK_EARLY_EXIT(env); \
266
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
267
\
268
for (i = env->vstart; i < vl; i++) { \
269
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
270
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
271
uint32_t vta_all_1s = vext_vta_all_1s(desc); \
272
uint32_t i; \
273
\
274
- VSTART_CHECK_EARLY_EXIT(env); \
275
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
276
\
277
for (i = env->vstart; i < vl; i++) { \
278
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
279
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
280
uint32_t vma = vext_vma(desc); \
281
uint32_t i; \
282
\
283
- VSTART_CHECK_EARLY_EXIT(env); \
284
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
285
\
286
for (i = env->vstart; i < vl; i++) { \
287
if (!vm && !vext_elem_mask(v0, i)) { \
288
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
289
uint32_t vma = vext_vma(desc); \
290
uint32_t i; \
291
\
292
- VSTART_CHECK_EARLY_EXIT(env); \
293
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
294
\
295
for (i = env->vstart; i < vl; i++) { \
296
if (!vm && !vext_elem_mask(v0, i)) { \
297
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
298
uint32_t vma = vext_vma(desc); \
299
uint32_t i; \
300
\
301
- VSTART_CHECK_EARLY_EXIT(env); \
302
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
303
\
304
for (i = env->vstart; i < vl; i++) { \
305
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
306
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
307
uint32_t vma = vext_vma(desc); \
308
uint32_t i; \
309
\
310
- VSTART_CHECK_EARLY_EXIT(env); \
311
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
312
\
313
for (i = env->vstart; i < vl; i++) { \
314
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
315
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
316
uint32_t vta = vext_vta(desc); \
317
uint32_t i; \
318
\
319
- VSTART_CHECK_EARLY_EXIT(env); \
320
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
321
\
322
for (i = env->vstart; i < vl; i++) { \
323
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
324
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
325
uint32_t vta = vext_vta(desc); \
326
uint32_t i; \
327
\
328
- VSTART_CHECK_EARLY_EXIT(env); \
329
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
330
\
331
for (i = env->vstart; i < vl; i++) { \
332
*((ETYPE *)vd + H(i)) = (ETYPE)s1; \
333
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
334
uint32_t vta = vext_vta(desc); \
335
uint32_t i; \
336
\
337
- VSTART_CHECK_EARLY_EXIT(env); \
338
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
339
\
340
for (i = env->vstart; i < vl; i++) { \
341
ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
342
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
343
uint32_t vta = vext_vta(desc); \
344
uint32_t i; \
345
\
346
- VSTART_CHECK_EARLY_EXIT(env); \
347
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
348
\
349
for (i = env->vstart; i < vl; i++) { \
350
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
351
@@ -XXX,XX +XXX,XX @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
352
uint32_t vl, uint32_t vm, int vxrm,
353
opivv2_rm_fn *fn, uint32_t vma, uint32_t esz)
354
{
355
- VSTART_CHECK_EARLY_EXIT(env);
356
+ VSTART_CHECK_EARLY_EXIT(env, vl);
357
358
for (uint32_t i = env->vstart; i < vl; i++) {
359
if (!vm && !vext_elem_mask(v0, i)) {
360
@@ -XXX,XX +XXX,XX @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
361
uint32_t vl, uint32_t vm, int vxrm,
362
opivx2_rm_fn *fn, uint32_t vma, uint32_t esz)
363
{
364
- VSTART_CHECK_EARLY_EXIT(env);
365
+ VSTART_CHECK_EARLY_EXIT(env, vl);
366
367
for (uint32_t i = env->vstart; i < vl; i++) {
368
if (!vm && !vext_elem_mask(v0, i)) {
369
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
370
uint32_t vma = vext_vma(desc); \
371
uint32_t i; \
372
\
373
- VSTART_CHECK_EARLY_EXIT(env); \
374
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
375
\
376
for (i = env->vstart; i < vl; i++) { \
377
if (!vm && !vext_elem_mask(v0, i)) { \
378
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
379
uint32_t vma = vext_vma(desc); \
380
uint32_t i; \
381
\
382
- VSTART_CHECK_EARLY_EXIT(env); \
383
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
384
\
385
for (i = env->vstart; i < vl; i++) { \
386
if (!vm && !vext_elem_mask(v0, i)) { \
387
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
388
uint32_t vma = vext_vma(desc); \
389
uint32_t i; \
390
\
391
- VSTART_CHECK_EARLY_EXIT(env); \
392
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
393
\
394
if (vl == 0) { \
395
return; \
396
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
397
uint32_t vma = vext_vma(desc); \
398
uint32_t i; \
399
\
400
- VSTART_CHECK_EARLY_EXIT(env); \
401
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
402
\
403
for (i = env->vstart; i < vl; i++) { \
404
ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
405
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
406
uint32_t vma = vext_vma(desc); \
407
uint32_t i; \
408
\
409
- VSTART_CHECK_EARLY_EXIT(env); \
410
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
411
\
412
for (i = env->vstart; i < vl; i++) { \
413
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
414
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
415
uint32_t vta = vext_vta(desc); \
416
uint32_t i; \
417
\
418
- VSTART_CHECK_EARLY_EXIT(env); \
419
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
420
\
421
for (i = env->vstart; i < vl; i++) { \
422
ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
423
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
424
uint32_t i; \
425
int a, b; \
426
\
427
- VSTART_CHECK_EARLY_EXIT(env); \
428
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
429
\
430
for (i = env->vstart; i < vl; i++) { \
431
a = vext_elem_mask(vs1, i); \
432
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
433
uint32_t vma = vext_vma(desc); \
434
int i; \
435
\
436
- VSTART_CHECK_EARLY_EXIT(env); \
437
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
438
\
439
for (i = env->vstart; i < vl; i++) { \
440
if (!vm && !vext_elem_mask(v0, i)) { \
441
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
442
uint32_t vma = vext_vma(desc); \
443
target_ulong offset = s1, i_min, i; \
444
\
445
- VSTART_CHECK_EARLY_EXIT(env); \
446
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
447
\
448
i_min = MAX(env->vstart, offset); \
449
for (i = i_min; i < vl; i++) { \
450
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
451
uint32_t vma = vext_vma(desc); \
452
target_ulong i_max, i_min, i; \
453
\
454
- VSTART_CHECK_EARLY_EXIT(env); \
455
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
456
\
457
i_min = MIN(s1 < vlmax ? vlmax - s1 : 0, vl); \
458
i_max = MAX(i_min, env->vstart); \
459
@@ -XXX,XX +XXX,XX @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
460
uint32_t vma = vext_vma(desc); \
461
uint32_t i; \
462
\
463
- VSTART_CHECK_EARLY_EXIT(env); \
464
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
465
\
466
for (i = env->vstart; i < vl; i++) { \
467
if (!vm && !vext_elem_mask(v0, i)) { \
468
@@ -XXX,XX +XXX,XX @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
469
uint32_t vma = vext_vma(desc); \
470
uint32_t i; \
471
\
472
- VSTART_CHECK_EARLY_EXIT(env); \
473
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
474
\
475
for (i = env->vstart; i < vl; i++) { \
476
if (!vm && !vext_elem_mask(v0, i)) { \
477
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
478
uint64_t index; \
479
uint32_t i; \
480
\
481
- VSTART_CHECK_EARLY_EXIT(env); \
482
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
483
\
484
for (i = env->vstart; i < vl; i++) { \
485
if (!vm && !vext_elem_mask(v0, i)) { \
486
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
487
uint64_t index = s1; \
488
uint32_t i; \
489
\
490
- VSTART_CHECK_EARLY_EXIT(env); \
491
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
492
\
493
for (i = env->vstart; i < vl; i++) { \
494
if (!vm && !vext_elem_mask(v0, i)) { \
495
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
496
uint32_t vma = vext_vma(desc); \
497
uint32_t i; \
498
\
499
- VSTART_CHECK_EARLY_EXIT(env); \
500
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
501
\
502
for (i = env->vstart; i < vl; i++) { \
503
if (!vm && !vext_elem_mask(v0, i)) { \
504
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
505
index XXXXXXX..XXXXXXX 100644
506
--- a/target/riscv/vector_internals.c
507
+++ b/target/riscv/vector_internals.c
508
@@ -XXX,XX +XXX,XX @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
509
uint32_t vma = vext_vma(desc);
510
uint32_t i;
511
512
- VSTART_CHECK_EARLY_EXIT(env);
513
+ VSTART_CHECK_EARLY_EXIT(env, vl);
514
515
for (i = env->vstart; i < vl; i++) {
516
if (!vm && !vext_elem_mask(v0, i)) {
517
@@ -XXX,XX +XXX,XX @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
518
uint32_t vma = vext_vma(desc);
519
uint32_t i;
520
521
- VSTART_CHECK_EARLY_EXIT(env);
522
+ VSTART_CHECK_EARLY_EXIT(env, vl);
523
524
for (i = env->vstart; i < vl; i++) {
525
if (!vm && !vext_elem_mask(v0, i)) {
48
--
526
--
49
2.47.1
527
2.48.1
diff view generated by jsdifflib
1
From: Craig Blackmore <craig.blackmore@embecosm.com>
1
From: Chao Liu <lc00631@tecorigin.com>
2
2
3
Replace `continus` with `continuous`.
3
Recently, when I was writing a RISCV test, I found that when VL is set to 0, the
4
instruction should be nop, but when I tested it, I found that QEMU will treat
5
all elements as tail elements, and in the case of VTA=1, write all elements
6
to 1.
4
7
5
Signed-off-by: Craig Blackmore <craig.blackmore@embecosm.com>
8
After troubleshooting, it was found that the vext_vx_rm_1 function was called in
9
the vext_vx_rm_2, and then the vext_set_elems_1s function was called to process
10
the tail element, but only VSTART >= vl was checked in the vext_vx_rm_1
11
function, which caused the tail element to still be processed even if it was
12
returned in advance.
13
14
So I've made the following change:
15
16
Put VSTART_CHECK_EARLY_EXIT(env) at the beginning of the vext_vx_rm_2 function,
17
so that the VSTART register is checked correctly.
18
19
Fixes: df4252b2ec ("target/riscv/vector_helpers: do early exit when
20
vstart >= vl")
21
Signed-off-by: Chao Liu <lc00631@tecorigin.com>
6
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
22
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Reviewed-by: Max Chou <max.chou@sifive.com>
23
Message-ID: <b2649f14915150be4c602d63cd3ea4adf47e9d75.1741573286.git.lc00631@tecorigin.com>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
9
Message-ID: <20241218142353.1027938-2-craig.blackmore@embecosm.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
24
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
25
---
12
target/riscv/vector_helper.c | 10 +++++-----
26
target/riscv/vector_helper.c | 18 ++++++++++++++----
13
1 file changed, 5 insertions(+), 5 deletions(-)
27
1 file changed, 14 insertions(+), 4 deletions(-)
14
28
15
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
29
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
16
index XXXXXXX..XXXXXXX 100644
30
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/vector_helper.c
31
--- a/target/riscv/vector_helper.c
18
+++ b/target/riscv/vector_helper.c
32
+++ b/target/riscv/vector_helper.c
19
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_ELEM(ste_w, uint32_t, H4, stl)
33
@@ -XXX,XX +XXX,XX @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
20
GEN_VEXT_ST_ELEM(ste_d, uint64_t, H8, stq)
34
uint32_t vl, uint32_t vm, int vxrm,
21
35
opivv2_rm_fn *fn, uint32_t vma, uint32_t esz)
22
static inline QEMU_ALWAYS_INLINE void
23
-vext_continus_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
24
+vext_continuous_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
25
void *vd, uint32_t evl, target_ulong addr,
26
uint32_t reg_start, uintptr_t ra, uint32_t esz,
27
bool is_load)
28
@@ -XXX,XX +XXX,XX @@ vext_continus_ldst_tlb(CPURISCVState *env, vext_ldst_elem_fn_tlb *ldst_tlb,
29
}
30
31
static inline QEMU_ALWAYS_INLINE void
32
-vext_continus_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
33
+vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host,
34
void *vd, uint32_t evl, uint32_t reg_start, void *host,
35
uint32_t esz, bool is_load)
36
{
36
{
37
@@ -XXX,XX +XXX,XX @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
37
- VSTART_CHECK_EARLY_EXIT(env, vl);
38
38
-
39
if (flags == 0) {
39
for (uint32_t i = env->vstart; i < vl; i++) {
40
if (nf == 1) {
40
if (!vm && !vext_elem_mask(v0, i)) {
41
- vext_continus_ldst_host(env, ldst_host, vd, evl, env->vstart, host,
41
/* set masked-off elements to 1s */
42
- esz, is_load);
42
@@ -XXX,XX +XXX,XX @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
43
+ vext_continuous_ldst_host(env, ldst_host, vd, evl, env->vstart,
43
uint32_t vta = vext_vta(desc);
44
+ host, esz, is_load);
44
uint32_t vma = vext_vma(desc);
45
} else {
45
46
for (i = env->vstart; i < evl; ++i) {
46
+ VSTART_CHECK_EARLY_EXIT(env, vl);
47
k = 0;
47
+
48
@@ -XXX,XX +XXX,XX @@ vext_page_ldst_us(CPURISCVState *env, void *vd, target_ulong addr,
48
switch (env->vxrm) {
49
env->vstart += elems;
49
case 0: /* rnu */
50
} else {
50
vext_vv_rm_1(vd, v0, vs1, vs2,
51
if (nf == 1) {
51
@@ -XXX,XX +XXX,XX @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
52
- vext_continus_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
52
uint32_t vl, uint32_t vm, int vxrm,
53
+ vext_continuous_ldst_tlb(env, ldst_tlb, vd, evl, addr, env->vstart,
53
opivx2_rm_fn *fn, uint32_t vma, uint32_t esz)
54
ra, esz, is_load);
54
{
55
} else {
55
- VSTART_CHECK_EARLY_EXIT(env, vl);
56
/* load bytes from guest memory */
56
-
57
for (uint32_t i = env->vstart; i < vl; i++) {
58
if (!vm && !vext_elem_mask(v0, i)) {
59
/* set masked-off elements to 1s */
60
@@ -XXX,XX +XXX,XX @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
61
uint32_t vta = vext_vta(desc);
62
uint32_t vma = vext_vma(desc);
63
64
+ VSTART_CHECK_EARLY_EXIT(env, vl);
65
+
66
switch (env->vxrm) {
67
case 0: /* rnu */
68
vext_vx_rm_1(vd, v0, s1, vs2,
69
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
70
uint32_t i; \
71
TD s1 = *((TD *)vs1 + HD(0)); \
72
\
73
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
74
+ \
75
for (i = env->vstart; i < vl; i++) { \
76
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
77
if (!vm && !vext_elem_mask(v0, i)) { \
78
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
79
uint32_t i; \
80
TD s1 = *((TD *)vs1 + HD(0)); \
81
\
82
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
83
+ \
84
for (i = env->vstart; i < vl; i++) { \
85
TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
86
if (!vm && !vext_elem_mask(v0, i)) { \
87
@@ -XXX,XX +XXX,XX @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env,
88
int i;
89
bool first_mask_bit = false;
90
91
+ VSTART_CHECK_EARLY_EXIT(env, vl);
92
+
93
for (i = env->vstart; i < vl; i++) {
94
if (!vm && !vext_elem_mask(v0, i)) {
95
/* set masked-off elements to 1s */
96
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
97
uint32_t sum = 0; \
98
int i; \
99
\
100
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
101
+ \
102
for (i = env->vstart; i < vl; i++) { \
103
if (!vm && !vext_elem_mask(v0, i)) { \
104
/* set masked-off elements to 1s */ \
105
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
106
uint32_t vta = vext_vta(desc); \
107
uint32_t num = 0, i; \
108
\
109
+ VSTART_CHECK_EARLY_EXIT(env, vl); \
110
+ \
111
for (i = env->vstart; i < vl; i++) { \
112
if (!vext_elem_mask(vs1, i)) { \
113
continue; \
57
--
114
--
58
2.47.1
115
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Yanfeng Liu <yfliu2008@qq.com>
2
1
3
This adds virtualization mode (V bit) as bit(2) of register `priv`
4
per RiscV debug spec v1.0.0-rc4. Checked with gdb-multiarch v12.1.
5
6
Note that GDB may display `INVALID` tag for `priv` reg when V bit
7
is set, this doesn't affect actual access to the bit though.
8
9
Signed-off-by: Yanfeng Liu <yfliu2008@qq.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Message-ID: <tencent_1993B55C24DE7979BF34B200F78287002907@qq.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
target/riscv/gdbstub.c | 23 +++++++++++++++++++----
15
1 file changed, 19 insertions(+), 4 deletions(-)
16
17
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/gdbstub.c
20
+++ b/target/riscv/gdbstub.c
21
@@ -XXX,XX +XXX,XX @@ static int riscv_gdb_get_virtual(CPUState *cs, GByteArray *buf, int n)
22
RISCVCPU *cpu = RISCV_CPU(cs);
23
CPURISCVState *env = &cpu->env;
24
25
- return gdb_get_regl(buf, env->priv);
26
+ /* Per RiscV debug spec v1.0.0 rc4 */
27
+ target_ulong vbit = (env->virt_enabled) ? BIT(2) : 0;
28
+
29
+ return gdb_get_regl(buf, env->priv | vbit);
30
#endif
31
}
32
return 0;
33
@@ -XXX,XX +XXX,XX @@ static int riscv_gdb_set_virtual(CPUState *cs, uint8_t *mem_buf, int n)
34
RISCVCPU *cpu = RISCV_CPU(cs);
35
CPURISCVState *env = &cpu->env;
36
37
- env->priv = ldtul_p(mem_buf) & 0x3;
38
- if (env->priv == PRV_RESERVED) {
39
- env->priv = PRV_S;
40
+ target_ulong new_priv = ldtul_p(mem_buf) & 0x3;
41
+ bool new_virt = 0;
42
+
43
+ if (new_priv == PRV_RESERVED) {
44
+ new_priv = PRV_S;
45
+ }
46
+
47
+ if (new_priv != PRV_M) {
48
+ new_virt = (ldtul_p(mem_buf) & BIT(2)) >> 2;
49
}
50
+
51
+ if (riscv_has_ext(env, RVH) && new_virt != env->virt_enabled) {
52
+ riscv_cpu_swap_hypervisor_regs(env);
53
+ }
54
+
55
+ riscv_cpu_set_mode(env, new_priv, new_virt);
56
#endif
57
return sizeof(target_ulong);
58
}
59
--
60
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
shcounterenw is defined in RVA22 as:
4
5
"For any hpmcounter that is not read-only zero, the corresponding bit in
6
hcounteren must be writable."
7
8
This is always true in TCG so let's claim support for it.
9
10
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-ID: <20241218114026.1652352-4-dbarboza@ventanamicro.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/cpu.c | 1 +
16
tests/data/acpi/riscv64/virt/RHCT | Bin 332 -> 346 bytes
17
2 files changed, 1 insertion(+)
18
19
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu.c
22
+++ b/target/riscv/cpu.c
23
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
24
ISA_EXT_DATA_ENTRY(zvkt, PRIV_VERSION_1_12_0, ext_zvkt),
25
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
26
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
27
+ ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
28
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
29
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
30
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
31
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
32
index XXXXXXX..XXXXXXX 100644
33
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
34
--
35
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
shvstvala is defined in RVA22 as:
4
5
"vstval must be written in all cases described above for stval."
6
7
By "cases describe above" the doc refer to the description of sstvala:
8
9
"stval must be written with the faulting virtual address for load,
10
store, and instruction page-fault, access-fault, and misaligned
11
exceptions, and for breakpoint exceptions other than those caused by
12
execution of the EBREAK or C.EBREAK instructions. For
13
virtual-instruction and illegal-instruction exceptions, stval must be
14
written with the faulting instruction."
15
16
We already have sstvala, and our vstval follows the same rules as stval,
17
so we can claim to support shvstvala too.
18
19
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
20
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
21
Message-ID: <20241218114026.1652352-5-dbarboza@ventanamicro.com>
22
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
23
---
24
target/riscv/cpu.c | 1 +
25
tests/data/acpi/riscv64/virt/RHCT | Bin 346 -> 356 bytes
26
2 files changed, 1 insertion(+)
27
28
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/riscv/cpu.c
31
+++ b/target/riscv/cpu.c
32
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
33
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
34
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
35
ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
36
+ ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
37
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
38
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
39
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
40
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
41
index XXXXXXX..XXXXXXX 100644
42
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
43
--
44
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
shtvala is described in RVA22 as:
4
5
"htval must be written with the faulting guest physical address
6
in all circumstances permitted by the ISA."
7
8
This is the case since commit 3067553993, so claim support for shtvala.
9
10
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-ID: <20241218114026.1652352-6-dbarboza@ventanamicro.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/cpu.c | 1 +
16
tests/data/acpi/riscv64/virt/RHCT | Bin 356 -> 364 bytes
17
2 files changed, 1 insertion(+)
18
19
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu.c
22
+++ b/target/riscv/cpu.c
23
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
24
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
25
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
26
ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
27
+ ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
28
ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
29
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
30
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
31
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
32
index XXXXXXX..XXXXXXX 100644
33
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
34
--
35
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
shvstvecd is defined in RVA22 as:
4
5
"vstvec.MODE must be capable of holding the value 0 (Direct).
6
When vstvec.MODE=Direct, vstvec.BASE must be capable of holding any
7
valid four-byte-aligned address."
8
9
This is always true for TCG so let's claim support for it.
10
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-ID: <20241218114026.1652352-7-dbarboza@ventanamicro.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
target/riscv/cpu.c | 1 +
17
tests/data/acpi/riscv64/virt/RHCT | Bin 364 -> 374 bytes
18
2 files changed, 1 insertion(+)
19
20
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/cpu.c
23
+++ b/target/riscv/cpu.c
24
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
25
ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
26
ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
27
ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
28
+ ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
29
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
30
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
31
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
32
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
33
index XXXXXXX..XXXXXXX 100644
34
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
35
--
36
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
shvsatpa is defined in RVA22 as:
4
5
"All translation modes supported in satp must be supported in vsatp."
6
7
This is always true in TCG so let's claim support for it.
8
9
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Message-ID: <20241218114026.1652352-8-dbarboza@ventanamicro.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
target/riscv/cpu.c | 1 +
15
tests/data/acpi/riscv64/virt/RHCT | Bin 374 -> 382 bytes
16
2 files changed, 1 insertion(+)
17
18
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/cpu.c
21
+++ b/target/riscv/cpu.c
22
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
23
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
24
ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
25
ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
26
+ ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
27
ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
28
ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
29
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
30
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
31
index XXXXXXX..XXXXXXX 100644
32
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
33
--
34
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
shgatpa is defined in RVA22 as:
4
5
"For each supported virtual memory scheme SvNN supported in satp, the
6
corresponding hgatp SvNNx4 mode must be supported. The hgatp mode Bare
7
must also be supported."
8
9
Claim support for shgatpa since this is always true for TCG.
10
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-ID: <20241218114026.1652352-9-dbarboza@ventanamicro.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
target/riscv/cpu.c | 1 +
17
tests/data/acpi/riscv64/virt/RHCT | Bin 382 -> 390 bytes
18
2 files changed, 1 insertion(+)
19
20
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/cpu.c
23
+++ b/target/riscv/cpu.c
24
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
25
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
26
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
27
ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
28
+ ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
29
ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
30
ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
31
ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
32
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
33
index XXXXXXX..XXXXXXX 100644
34
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
35
--
36
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
'sha' is the augmented hypervisor extension, defined in RVA22 as a set of
4
the following extensions:
5
6
- RVH
7
- Ssstateen
8
- Shcounterenw (always present)
9
- Shvstvala (always present)
10
- Shtvala (always present)
11
- Shvstvecd (always present)
12
- Shvsatpa (always present)
13
- Shgatpa (always present)
14
15
We can claim support for 'sha' by checking if we have RVH and ssstateen.
16
17
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
18
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
19
Message-ID: <20241218114026.1652352-10-dbarboza@ventanamicro.com>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
---
22
target/riscv/cpu_cfg.h | 1 +
23
target/riscv/cpu.c | 2 ++
24
target/riscv/tcg/tcg-cpu.c | 8 ++++++++
25
3 files changed, 11 insertions(+)
26
27
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
28
index XXXXXXX..XXXXXXX 100644
29
--- a/target/riscv/cpu_cfg.h
30
+++ b/target/riscv/cpu_cfg.h
31
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
32
bool ext_svade;
33
bool ext_zic64b;
34
bool ext_ssstateen;
35
+ bool ext_sha;
36
37
/*
38
* Always 'true' booleans for named features
39
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/target/riscv/cpu.c
42
+++ b/target/riscv/cpu.c
43
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
44
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
45
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
46
ISA_EXT_DATA_ENTRY(shcounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
47
+ ISA_EXT_DATA_ENTRY(sha, PRIV_VERSION_1_12_0, ext_sha),
48
ISA_EXT_DATA_ENTRY(shgatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
49
ISA_EXT_DATA_ENTRY(shtvala, PRIV_VERSION_1_12_0, has_priv_1_12),
50
ISA_EXT_DATA_ENTRY(shvsatpa, PRIV_VERSION_1_12_0, has_priv_1_12),
51
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = {
52
const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
53
MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
54
MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
55
+ MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
56
57
{ },
58
};
59
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/riscv/tcg/tcg-cpu.c
62
+++ b/target/riscv/tcg/tcg-cpu.c
63
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
64
cpu->cfg.cbop_blocksize = 64;
65
cpu->cfg.cboz_blocksize = 64;
66
break;
67
+ case CPU_CFG_OFFSET(ext_sha):
68
+ if (!cpu_misa_ext_is_user_set(RVH)) {
69
+ riscv_cpu_write_misa_bit(cpu, RVH, true);
70
+ }
71
+ /* fallthrough */
72
case CPU_CFG_OFFSET(ext_ssstateen):
73
cpu->cfg.ext_smstateen = true;
74
break;
75
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu)
76
cpu->cfg.cboz_blocksize == 64;
77
78
cpu->cfg.ext_ssstateen = cpu->cfg.ext_smstateen;
79
+
80
+ cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
81
+ cpu->cfg.ext_ssstateen;
82
}
83
84
static void riscv_cpu_validate_g(RISCVCPU *cpu)
85
--
86
2.47.1
diff view generated by jsdifflib
1
From: Kaiwen Xue <kaiwenx@rivosinc.com>
1
From: Richard Henderson <richard.henderson@linaro.org>
2
2
3
The Smcdeleg/Ssccfg adds the support for counter delegation via
3
The third argument of the syscall contains the size of the
4
S*indcsr and Ssccfg.
4
cpu mask in bytes, not bits. Nor is the size rounded up to
5
a multiple of sizeof(abi_ulong).
5
6
6
It also adds a new shadow CSR scountinhibit and menvcfg enable bit (CDE)
7
Cc: qemu-stable@nongnu.org
7
to enable this extension and scountovf virtualization.
8
Reported-by: Andreas Schwab <schwab@suse.de>
8
9
Fixes: 9e1c7d982d7 ("linux-user/riscv: Add syscall riscv_hwprobe")
9
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
10
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
10
Co-developed-by: Atish Patra <atishp@rivosinc.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Message-ID: <20250308225902.1208237-3-richard.henderson@linaro.org>
12
Acked-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Atish Patra <atishp@rivosinc.com>
14
Message-ID: <20250110-counter_delegation-v5-8-e83d797ae294@rivosinc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
14
---
17
target/riscv/csr.c | 304 +++++++++++++++++++++++++++++++++++++++++++--
15
linux-user/syscall.c | 55 +++++++++++++++++++++++---------------------
18
1 file changed, 292 insertions(+), 12 deletions(-)
16
1 file changed, 29 insertions(+), 26 deletions(-)
19
17
20
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
18
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
21
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/csr.c
20
--- a/linux-user/syscall.c
23
+++ b/target/riscv/csr.c
21
+++ b/linux-user/syscall.c
24
@@ -XXX,XX +XXX,XX @@ static RISCVException aia_smode32(CPURISCVState *env, int csrno)
22
@@ -XXX,XX +XXX,XX @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env,
25
return smode32(env, csrno);
23
}
26
}
24
}
27
25
28
+static RISCVException scountinhibit_pred(CPURISCVState *env, int csrno)
26
-static int cpu_set_valid(abi_long arg3, abi_long arg4)
29
+{
27
+/*
30
+ RISCVCPU *cpu = env_archcpu(env);
28
+ * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
31
+
29
+ * If the cpumast_t has no bits set: -EINVAL.
32
+ if (!cpu->cfg.ext_ssccfg || !cpu->cfg.ext_smcdeleg) {
30
+ * Otherwise the cpumask_t contains some bit set: 0.
33
+ return RISCV_EXCP_ILLEGAL_INST;
31
+ * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
34
+ }
32
+ * nor bound the search by cpumask_size().
35
+
33
+ */
36
+ if (env->virt_enabled) {
34
+static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
37
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
38
+ }
39
+
40
+ return smode(env, csrno);
41
+}
42
+
43
static bool csrind_extensions_present(CPURISCVState *env)
44
{
35
{
45
return riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind;
36
- int ret, i, tmp;
46
@@ -XXX,XX +XXX,XX @@ done:
37
- size_t host_mask_size, target_mask_size;
47
return result;
38
- unsigned long *host_mask;
48
}
39
-
49
40
- /*
50
-static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
41
- * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
51
- target_ulong val)
42
- * arg3 contains the cpu count.
52
+static RISCVException riscv_pmu_write_ctr(CPURISCVState *env, target_ulong val,
43
- */
53
+ uint32_t ctr_idx)
44
- tmp = (8 * sizeof(abi_ulong));
54
{
45
- target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
55
- int ctr_idx = csrno - CSR_MCYCLE;
46
- host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
56
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
47
- ~(sizeof(*host_mask) - 1);
57
uint64_t mhpmctr_val = val;
48
-
58
49
- host_mask = alloca(host_mask_size);
59
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
50
-
60
return RISCV_EXCP_NONE;
51
- ret = target_to_host_cpu_mask(host_mask, host_mask_size,
61
}
52
- arg4, target_mask_size);
62
53
- if (ret != 0) {
63
-static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
54
- return ret;
64
- target_ulong val)
55
- }
65
+static RISCVException riscv_pmu_write_ctrh(CPURISCVState *env, target_ulong val,
56
+ unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
66
+ uint32_t ctr_idx)
57
+ int ret = -TARGET_EFAULT;
67
{
58
68
- int ctr_idx = csrno - CSR_MCYCLEH;
59
- for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
69
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
60
- if (host_mask[i] != 0) {
70
uint64_t mhpmctr_val = counter->mhpmcounter_val;
61
- return 0;
71
uint64_t mhpmctrh_val = val;
62
+ if (p) {
72
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
63
+ ret = -TARGET_EINVAL;
73
return RISCV_EXCP_NONE;
64
+ /*
74
}
65
+ * Since we only care about the empty/non-empty state of the cpumask_t
75
66
+ * not the individual bits, we do not need to repartition the bits
76
+static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
67
+ * from target abi_ulong to host unsigned long.
77
+{
68
+ *
78
+ int ctr_idx = csrno - CSR_MCYCLE;
69
+ * Note that the kernel does not round up cpusetsize to a multiple of
79
+
70
+ * sizeof(abi_ulong). After bounding cpusetsize by cpumask_size(),
80
+ return riscv_pmu_write_ctr(env, val, ctr_idx);
71
+ * it copies exactly cpusetsize bytes into a zeroed buffer.
81
+}
72
+ */
82
+
73
+ for (abi_ulong i = 0; i < cpusetsize; ++i) {
83
+static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
74
+ if (p[i]) {
84
+{
75
+ ret = 0;
85
+ int ctr_idx = csrno - CSR_MCYCLEH;
76
+ break;
86
+
77
+ }
87
+ return riscv_pmu_write_ctrh(env, val, ctr_idx);
78
}
88
+}
79
+ unlock_user(p, target_cpus, 0);
89
+
90
RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
91
bool upper_half, uint32_t ctr_idx)
92
{
93
@@ -XXX,XX +XXX,XX @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
94
return riscv_pmu_read_ctr(env, val, true, ctr_index);
95
}
96
97
+static int rmw_cd_mhpmcounter(CPURISCVState *env, int ctr_idx,
98
+ target_ulong *val, target_ulong new_val,
99
+ target_ulong wr_mask)
100
+{
101
+ if (wr_mask != 0 && wr_mask != -1) {
102
+ return -EINVAL;
103
+ }
104
+
105
+ if (!wr_mask && val) {
106
+ riscv_pmu_read_ctr(env, val, false, ctr_idx);
107
+ } else if (wr_mask) {
108
+ riscv_pmu_write_ctr(env, new_val, ctr_idx);
109
+ } else {
110
+ return -EINVAL;
111
+ }
112
+
113
+ return 0;
114
+}
115
+
116
+static int rmw_cd_mhpmcounterh(CPURISCVState *env, int ctr_idx,
117
+ target_ulong *val, target_ulong new_val,
118
+ target_ulong wr_mask)
119
+{
120
+ if (wr_mask != 0 && wr_mask != -1) {
121
+ return -EINVAL;
122
+ }
123
+
124
+ if (!wr_mask && val) {
125
+ riscv_pmu_read_ctr(env, val, true, ctr_idx);
126
+ } else if (wr_mask) {
127
+ riscv_pmu_write_ctrh(env, new_val, ctr_idx);
128
+ } else {
129
+ return -EINVAL;
130
+ }
131
+
132
+ return 0;
133
+}
134
+
135
+static int rmw_cd_mhpmevent(CPURISCVState *env, int evt_index,
136
+ target_ulong *val, target_ulong new_val,
137
+ target_ulong wr_mask)
138
+{
139
+ uint64_t mhpmevt_val = new_val;
140
+
141
+ if (wr_mask != 0 && wr_mask != -1) {
142
+ return -EINVAL;
143
+ }
144
+
145
+ if (!wr_mask && val) {
146
+ *val = env->mhpmevent_val[evt_index];
147
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
148
+ *val &= ~MHPMEVENT_BIT_MINH;
149
+ }
150
+ } else if (wr_mask) {
151
+ wr_mask &= ~MHPMEVENT_BIT_MINH;
152
+ mhpmevt_val = (new_val & wr_mask) |
153
+ (env->mhpmevent_val[evt_index] & ~wr_mask);
154
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
155
+ mhpmevt_val = mhpmevt_val |
156
+ ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
157
+ }
158
+ env->mhpmevent_val[evt_index] = mhpmevt_val;
159
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
160
+ } else {
161
+ return -EINVAL;
162
+ }
163
+
164
+ return 0;
165
+}
166
+
167
+static int rmw_cd_mhpmeventh(CPURISCVState *env, int evt_index,
168
+ target_ulong *val, target_ulong new_val,
169
+ target_ulong wr_mask)
170
+{
171
+ uint64_t mhpmevth_val;
172
+ uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
173
+
174
+ if (wr_mask != 0 && wr_mask != -1) {
175
+ return -EINVAL;
176
+ }
177
+
178
+ if (!wr_mask && val) {
179
+ *val = env->mhpmeventh_val[evt_index];
180
+ if (riscv_cpu_cfg(env)->ext_sscofpmf) {
181
+ *val &= ~MHPMEVENTH_BIT_MINH;
182
+ }
183
+ } else if (wr_mask) {
184
+ wr_mask &= ~MHPMEVENTH_BIT_MINH;
185
+ env->mhpmeventh_val[evt_index] =
186
+ (new_val & wr_mask) | (env->mhpmeventh_val[evt_index] & ~wr_mask);
187
+ mhpmevth_val = env->mhpmeventh_val[evt_index];
188
+ mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
189
+ riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
190
+ } else {
191
+ return -EINVAL;
192
+ }
193
+
194
+ return 0;
195
+}
196
+
197
+static int rmw_cd_ctr_cfg(CPURISCVState *env, int cfg_index, target_ulong *val,
198
+ target_ulong new_val, target_ulong wr_mask)
199
+{
200
+ switch (cfg_index) {
201
+ case 0: /* CYCLECFG */
202
+ if (wr_mask) {
203
+ wr_mask &= ~MCYCLECFG_BIT_MINH;
204
+ env->mcyclecfg = (new_val & wr_mask) | (env->mcyclecfg & ~wr_mask);
205
+ } else {
206
+ *val = env->mcyclecfg &= ~MHPMEVENTH_BIT_MINH;
207
+ }
208
+ break;
209
+ case 2: /* INSTRETCFG */
210
+ if (wr_mask) {
211
+ wr_mask &= ~MINSTRETCFG_BIT_MINH;
212
+ env->minstretcfg = (new_val & wr_mask) |
213
+ (env->minstretcfg & ~wr_mask);
214
+ } else {
215
+ *val = env->minstretcfg &= ~MHPMEVENTH_BIT_MINH;
216
+ }
217
+ break;
218
+ default:
219
+ return -EINVAL;
220
+ }
221
+ return 0;
222
+}
223
+
224
+static int rmw_cd_ctr_cfgh(CPURISCVState *env, int cfg_index, target_ulong *val,
225
+ target_ulong new_val, target_ulong wr_mask)
226
+{
227
+
228
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
229
+ return RISCV_EXCP_ILLEGAL_INST;
230
+ }
231
+
232
+ switch (cfg_index) {
233
+ case 0: /* CYCLECFGH */
234
+ if (wr_mask) {
235
+ wr_mask &= ~MCYCLECFGH_BIT_MINH;
236
+ env->mcyclecfgh = (new_val & wr_mask) |
237
+ (env->mcyclecfgh & ~wr_mask);
238
+ } else {
239
+ *val = env->mcyclecfgh;
240
+ }
241
+ break;
242
+ case 2: /* INSTRETCFGH */
243
+ if (wr_mask) {
244
+ wr_mask &= ~MINSTRETCFGH_BIT_MINH;
245
+ env->minstretcfgh = (new_val & wr_mask) |
246
+ (env->minstretcfgh & ~wr_mask);
247
+ } else {
248
+ *val = env->minstretcfgh;
249
+ }
250
+ break;
251
+ default:
252
+ return -EINVAL;
253
+ }
254
+ return 0;
255
+}
256
+
257
+
258
static RISCVException read_scountovf(CPURISCVState *env, int csrno,
259
target_ulong *val)
260
{
261
@@ -XXX,XX +XXX,XX @@ static RISCVException read_scountovf(CPURISCVState *env, int csrno,
262
target_ulong *mhpm_evt_val;
263
uint64_t of_bit_mask;
264
265
+ /* Virtualize scountovf for counter delegation */
266
+ if (riscv_cpu_cfg(env)->ext_sscofpmf &&
267
+ riscv_cpu_cfg(env)->ext_ssccfg &&
268
+ get_field(env->menvcfg, MENVCFG_CDE) &&
269
+ env->virt_enabled) {
270
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
271
+ }
272
+
273
if (riscv_cpu_mxl(env) == MXL_RV32) {
274
mhpm_evt_val = env->mhpmeventh_val;
275
of_bit_mask = MHPMEVENTH_BIT_OF;
276
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg_cd(CPURISCVState *env, int csrno,
277
target_ulong isel, target_ulong *val,
278
target_ulong new_val, target_ulong wr_mask)
279
{
280
- if (!riscv_cpu_cfg(env)->ext_smcdeleg) {
281
- return RISCV_EXCP_ILLEGAL_INST;
282
+ int ret = -EINVAL;
283
+ int ctr_index = isel - ISELECT_CD_FIRST;
284
+ int isel_hpm_start = ISELECT_CD_FIRST + 3;
285
+
286
+ if (!riscv_cpu_cfg(env)->ext_smcdeleg || !riscv_cpu_cfg(env)->ext_ssccfg) {
287
+ ret = RISCV_EXCP_ILLEGAL_INST;
288
+ goto done;
289
}
80
}
290
- /* TODO: Implement the functionality later */
81
- return -TARGET_EINVAL;
291
- return RISCV_EXCP_NONE;
292
+
293
+ /* Invalid siselect value for reserved */
294
+ if (ctr_index == 1) {
295
+ goto done;
296
+ }
297
+
298
+ /* sireg4 and sireg5 provides access RV32 only CSRs */
299
+ if (((csrno == CSR_SIREG5) || (csrno == CSR_SIREG4)) &&
300
+ (riscv_cpu_mxl(env) != MXL_RV32)) {
301
+ ret = RISCV_EXCP_ILLEGAL_INST;
302
+ goto done;
303
+ }
304
+
305
+ /* Check Sscofpmf dependancy */
306
+ if (!riscv_cpu_cfg(env)->ext_sscofpmf && csrno == CSR_SIREG5 &&
307
+ (isel_hpm_start <= isel && isel <= ISELECT_CD_LAST)) {
308
+ goto done;
309
+ }
310
+
311
+ /* Check smcntrpmf dependancy */
312
+ if (!riscv_cpu_cfg(env)->ext_smcntrpmf &&
313
+ (csrno == CSR_SIREG2 || csrno == CSR_SIREG5) &&
314
+ (ISELECT_CD_FIRST <= isel && isel < isel_hpm_start)) {
315
+ goto done;
316
+ }
317
+
318
+ if (!get_field(env->mcounteren, BIT(ctr_index)) ||
319
+ !get_field(env->menvcfg, MENVCFG_CDE)) {
320
+ goto done;
321
+ }
322
+
323
+ switch (csrno) {
324
+ case CSR_SIREG:
325
+ ret = rmw_cd_mhpmcounter(env, ctr_index, val, new_val, wr_mask);
326
+ break;
327
+ case CSR_SIREG4:
328
+ ret = rmw_cd_mhpmcounterh(env, ctr_index, val, new_val, wr_mask);
329
+ break;
330
+ case CSR_SIREG2:
331
+ if (ctr_index <= 2) {
332
+ ret = rmw_cd_ctr_cfg(env, ctr_index, val, new_val, wr_mask);
333
+ } else {
334
+ ret = rmw_cd_mhpmevent(env, ctr_index, val, new_val, wr_mask);
335
+ }
336
+ break;
337
+ case CSR_SIREG5:
338
+ if (ctr_index <= 2) {
339
+ ret = rmw_cd_ctr_cfgh(env, ctr_index, val, new_val, wr_mask);
340
+ } else {
341
+ ret = rmw_cd_mhpmeventh(env, ctr_index, val, new_val, wr_mask);
342
+ }
343
+ break;
344
+ default:
345
+ goto done;
346
+ }
347
+
348
+done:
349
+ return ret;
82
+ return ret;
350
}
83
}
351
84
352
/*
85
static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
353
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
86
@@ -XXX,XX +XXX,XX @@ static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
354
return RISCV_EXCP_NONE;
87
355
}
88
/* check cpu_set */
356
89
if (arg3 != 0) {
357
+static RISCVException read_scountinhibit(CPURISCVState *env, int csrno,
90
- ret = cpu_set_valid(arg3, arg4);
358
+ target_ulong *val)
91
+ ret = nonempty_cpu_set(arg3, arg4);
359
+{
92
if (ret != 0) {
360
+ /* S-mode can only access the bits delegated by M-mode */
93
return ret;
361
+ *val = env->mcountinhibit & env->mcounteren;
94
}
362
+ return RISCV_EXCP_NONE;
363
+}
364
+
365
+static RISCVException write_scountinhibit(CPURISCVState *env, int csrno,
366
+ target_ulong val)
367
+{
368
+ write_mcountinhibit(env, csrno, val & env->mcounteren);
369
+ return RISCV_EXCP_NONE;
370
+}
371
+
372
static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
373
target_ulong *val)
374
{
375
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
376
target_ulong val)
377
{
378
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
379
- uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
380
+ uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE |
381
+ MENVCFG_CBZE | MENVCFG_CDE;
382
383
if (riscv_cpu_mxl(env) == MXL_RV64) {
384
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
385
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
386
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
387
(cfg->ext_svadu ? MENVCFG_ADUE : 0);
388
389
if (env_archcpu(env)->cfg.ext_zicfilp) {
390
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
391
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
392
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
393
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
394
- (cfg->ext_svadu ? MENVCFG_ADUE : 0);
395
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
396
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0);
397
uint64_t valh = (uint64_t)val << 32;
398
399
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
400
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
401
[CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus,
402
.min_priv_ver = PRIV_VERSION_1_12_0 },
403
404
+ /* Supervisor Counter Delegation */
405
+ [CSR_SCOUNTINHIBIT] = {"scountinhibit", scountinhibit_pred,
406
+ read_scountinhibit, write_scountinhibit,
407
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
408
+
409
/* Supervisor Trap Setup */
410
[CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
411
NULL, read_sstatus_i128 },
412
--
95
--
413
2.47.1
96
2.48.1
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Yu-Ming Chang <yumin686@andestech.com>
2
2
3
Do a cosmetic change in riscv_raise_exception() to change 'exception'
3
For privilege version 1.12 or newer, C always implies Zca. We can only
4
type from uint32_t to RISCVException, making it a bit clear that the
4
check ext_zca to allow 16-bit aligned PC addresses. For older privilege
5
arg is directly correlated to the RISCVException enum.
5
versions, we only check C.
6
6
7
As a side effect, change 'excp' type from int to RISCVException in
7
Signed-off-by: Yu-Ming Chang <yumin686@andestech.com>
8
generate_exception() to guarantee that all callers of
9
riscv_raise_exception() will use the enum.
10
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Message-ID: <174184718265.10540.10120024221661781046-0@git.sr.ht>
14
Message-ID: <20250106173734.412353-2-dbarboza@ventanamicro.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
11
---
17
target/riscv/cpu.h | 3 ++-
12
target/riscv/cpu.h | 12 ++++++++++++
18
target/riscv/op_helper.c | 3 ++-
13
target/riscv/op_helper.c | 8 ++++++--
19
target/riscv/translate.c | 2 +-
14
target/riscv/translate.c | 4 +++-
20
3 files changed, 5 insertions(+), 3 deletions(-)
15
target/riscv/insn_trans/trans_rvi.c.inc | 8 ++++++--
16
4 files changed, 27 insertions(+), 5 deletions(-)
21
17
22
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
18
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
23
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/cpu.h
20
--- a/target/riscv/cpu.h
25
+++ b/target/riscv/cpu.h
21
+++ b/target/riscv/cpu.h
26
@@ -XXX,XX +XXX,XX @@ void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
22
@@ -XXX,XX +XXX,XX @@ static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env)
27
int *max_insns, vaddr pc, void *host_pc);
23
}
28
24
#endif
29
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
25
30
- uint32_t exception, uintptr_t pc);
26
+static inline bool riscv_cpu_allow_16bit_insn(const RISCVCPUConfig *cfg,
31
+ RISCVException exception,
27
+ target_long priv_ver,
32
+ uintptr_t pc);
28
+ uint32_t misa_ext)
33
29
+{
34
target_ulong riscv_cpu_get_fflags(CPURISCVState *env);
30
+ /* In priv spec version 1.12 or newer, C always implies Zca */
35
void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong);
31
+ if (priv_ver >= PRIV_VERSION_1_12_0) {
32
+ return cfg->ext_zca;
33
+ } else {
34
+ return misa_ext & RVC;
35
+ }
36
+}
37
+
38
/*
39
* Encode LMUL to lmul as follows:
40
* LMUL vlmul lmul
36
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
41
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
37
index XXXXXXX..XXXXXXX 100644
42
index XXXXXXX..XXXXXXX 100644
38
--- a/target/riscv/op_helper.c
43
--- a/target/riscv/op_helper.c
39
+++ b/target/riscv/op_helper.c
44
+++ b/target/riscv/op_helper.c
40
@@ -XXX,XX +XXX,XX @@
45
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env)
41
46
}
42
/* Exceptions processing helpers */
47
43
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
48
target_ulong retpc = env->sepc;
44
- uint32_t exception, uintptr_t pc)
49
- if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
45
+ RISCVException exception,
50
+ if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
46
+ uintptr_t pc)
51
+ env->priv_ver,
47
{
52
+ env->misa_ext) && (retpc & 0x3)) {
48
CPUState *cs = env_cpu(env);
53
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
49
cs->exception_index = exception;
54
}
55
56
@@ -XXX,XX +XXX,XX @@ static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
57
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
58
}
59
60
- if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
61
+ if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg,
62
+ env->priv_ver,
63
+ env->misa_ext) && (retpc & 0x3)) {
64
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
65
}
66
50
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
67
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
51
index XXXXXXX..XXXXXXX 100644
68
index XXXXXXX..XXXXXXX 100644
52
--- a/target/riscv/translate.c
69
--- a/target/riscv/translate.c
53
+++ b/target/riscv/translate.c
70
+++ b/target/riscv/translate.c
54
@@ -XXX,XX +XXX,XX @@ static void gen_update_pc(DisasContext *ctx, target_long diff)
71
@@ -XXX,XX +XXX,XX @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
55
ctx->pc_save = ctx->base.pc_next + diff;
72
TCGv succ_pc = dest_gpr(ctx, rd);
56
}
73
57
74
/* check misaligned: */
58
-static void generate_exception(DisasContext *ctx, int excp)
75
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
59
+static void generate_exception(DisasContext *ctx, RISCVException excp)
76
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
60
{
77
+ ctx->priv_ver,
61
gen_update_pc(ctx, 0);
78
+ ctx->misa_ext)) {
62
gen_helper_raise_exception(tcg_env, tcg_constant_i32(excp));
79
if ((imm & 0x3) != 0) {
80
TCGv target_pc = tcg_temp_new();
81
gen_pc_plus_diff(target_pc, ctx, imm);
82
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
83
index XXXXXXX..XXXXXXX 100644
84
--- a/target/riscv/insn_trans/trans_rvi.c.inc
85
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
86
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
87
tcg_gen_ext32s_tl(target_pc, target_pc);
88
}
89
90
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca) {
91
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
92
+ ctx->priv_ver,
93
+ ctx->misa_ext)) {
94
TCGv t0 = tcg_temp_new();
95
96
misaligned = gen_new_label();
97
@@ -XXX,XX +XXX,XX @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
98
99
gen_set_label(l); /* branch taken */
100
101
- if (!has_ext(ctx, RVC) && !ctx->cfg_ptr->ext_zca &&
102
+ if (!riscv_cpu_allow_16bit_insn(ctx->cfg_ptr,
103
+ ctx->priv_ver,
104
+ ctx->misa_ext) &&
105
(a->imm & 0x3)) {
106
/* misaligned */
107
TCGv target_pc = tcg_temp_new();
63
--
108
--
64
2.47.1
109
2.48.1
65
66
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
When using system mode we can get the CPU traps being taken via the
4
'riscv_trap' trace or the "-d int" qemu log. User mode does not a way of
5
logging/showing exceptions to users.
6
7
Add a trace in riscv_raise_exception() to allow qemu-riscv(32/64) users
8
to check all exceptions being thrown. This is particularly useful to
9
help identifying insns that are throwing SIGILLs.
10
11
As it is today we need to debug their binaries to identify where the
12
illegal insns are:
13
14
$ ~/work/qemu/build/qemu-riscv64 -cpu rv64 ./foo.out
15
Illegal instruction (core dumped)
16
17
After this change users can capture the trace and use EPC to pinpoint
18
the insn:
19
20
$ ~/work/qemu/build/qemu-riscv64 -cpu rv64 -trace riscv_exception ./foo.out
21
riscv_exception 8 (user_ecall) on epc 0x17cd2
22
riscv_exception 8 (user_ecall) on epc 0x17cda
23
riscv_exception 8 (user_ecall) on epc 0x17622
24
(...)
25
riscv_exception 2 (illegal_instruction) on epc 0x1053a
26
Illegal instruction (core dumped)
27
28
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
29
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
30
Message-ID: <20250106173734.412353-3-dbarboza@ventanamicro.com>
31
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
32
---
33
target/riscv/op_helper.c | 6 ++++++
34
target/riscv/trace-events | 3 +++
35
2 files changed, 9 insertions(+)
36
37
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/riscv/op_helper.c
40
+++ b/target/riscv/op_helper.c
41
@@ -XXX,XX +XXX,XX @@
42
#include "exec/exec-all.h"
43
#include "exec/cpu_ldst.h"
44
#include "exec/helper-proto.h"
45
+#include "trace.h"
46
47
/* Exceptions processing helpers */
48
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
49
@@ -XXX,XX +XXX,XX @@ G_NORETURN void riscv_raise_exception(CPURISCVState *env,
50
uintptr_t pc)
51
{
52
CPUState *cs = env_cpu(env);
53
+
54
+ trace_riscv_exception(exception,
55
+ riscv_cpu_get_trap_name(exception, false),
56
+ env->pc);
57
+
58
cs->exception_index = exception;
59
cpu_loop_exit_restore(cs, pc);
60
}
61
diff --git a/target/riscv/trace-events b/target/riscv/trace-events
62
index XXXXXXX..XXXXXXX 100644
63
--- a/target/riscv/trace-events
64
+++ b/target/riscv/trace-events
65
@@ -XXX,XX +XXX,XX @@ pmpaddr_csr_write(uint64_t mhartid, uint32_t addr_index, uint64_t val) "hart %"
66
67
mseccfg_csr_read(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": read mseccfg, val: 0x%" PRIx64
68
mseccfg_csr_write(uint64_t mhartid, uint64_t val) "hart %" PRIu64 ": write mseccfg, val: 0x%" PRIx64
69
+
70
+# op_helper.c
71
+riscv_exception(uint32_t exception, const char *desc, uint64_t epc) "%u (%s) on epc 0x%"PRIx64""
72
--
73
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Alexey Baturo <baturo.alexey@gmail.com>
2
1
3
Signed-off-by: Alexey Baturo <baturo.alexey@gmail.com>
4
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-ID: <20250106102346.1100149-3-baturo.alexey@gmail.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
target/riscv/cpu.h | 8 ++++++++
10
target/riscv/cpu_bits.h | 4 ++++
11
target/riscv/cpu_cfg.h | 3 +++
12
target/riscv/pmp.h | 1 +
13
target/riscv/csr.c | 33 +++++++++++++++++++++++++++++++--
14
target/riscv/pmp.c | 14 +++++++++++---
15
6 files changed, 58 insertions(+), 5 deletions(-)
16
17
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/cpu.h
20
+++ b/target/riscv/cpu.h
21
@@ -XXX,XX +XXX,XX @@ typedef enum {
22
EXT_STATUS_DIRTY,
23
} RISCVExtStatus;
24
25
+/* Enum holds PMM field values for Zjpm v1.0 extension */
26
+typedef enum {
27
+ PMM_FIELD_DISABLED = 0,
28
+ PMM_FIELD_RESERVED = 1,
29
+ PMM_FIELD_PMLEN7 = 2,
30
+ PMM_FIELD_PMLEN16 = 3,
31
+} RISCVPmPmm;
32
+
33
typedef struct riscv_cpu_implied_exts_rule {
34
#ifndef CONFIG_USER_ONLY
35
/*
36
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/riscv/cpu_bits.h
39
+++ b/target/riscv/cpu_bits.h
40
@@ -XXX,XX +XXX,XX @@ typedef enum {
41
#define HSTATUS_VTSR 0x00400000
42
#define HSTATUS_HUKTE 0x01000000
43
#define HSTATUS_VSXL 0x300000000
44
+#define HSTATUS_HUPMM 0x3000000000000
45
46
#define HSTATUS32_WPRI 0xFF8FF87E
47
#define HSTATUS64_WPRI 0xFFFFFFFFFF8FF87EULL
48
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
49
#define MENVCFG_CBIE (3UL << 4)
50
#define MENVCFG_CBCFE BIT(6)
51
#define MENVCFG_CBZE BIT(7)
52
+#define MENVCFG_PMM (3ULL << 32)
53
#define MENVCFG_ADUE (1ULL << 61)
54
#define MENVCFG_PBMTE (1ULL << 62)
55
#define MENVCFG_STCE (1ULL << 63)
56
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
57
#define SENVCFG_CBCFE MENVCFG_CBCFE
58
#define SENVCFG_CBZE MENVCFG_CBZE
59
#define SENVCFG_UKTE BIT(8)
60
+#define SENVCFG_PMM MENVCFG_PMM
61
62
#define HENVCFG_FIOM MENVCFG_FIOM
63
#define HENVCFG_LPE MENVCFG_LPE
64
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
65
#define HENVCFG_CBIE MENVCFG_CBIE
66
#define HENVCFG_CBCFE MENVCFG_CBCFE
67
#define HENVCFG_CBZE MENVCFG_CBZE
68
+#define HENVCFG_PMM MENVCFG_PMM
69
#define HENVCFG_ADUE MENVCFG_ADUE
70
#define HENVCFG_PBMTE MENVCFG_PBMTE
71
#define HENVCFG_STCE MENVCFG_STCE
72
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
73
index XXXXXXX..XXXXXXX 100644
74
--- a/target/riscv/cpu_cfg.h
75
+++ b/target/riscv/cpu_cfg.h
76
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
77
bool ext_ssaia;
78
bool ext_sscofpmf;
79
bool ext_smepmp;
80
+ bool ext_ssnpm;
81
+ bool ext_smnpm;
82
+ bool ext_smmpm;
83
bool rvv_ta_all_1s;
84
bool rvv_ma_all_1s;
85
bool rvv_vl_half_avl;
86
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
87
index XXXXXXX..XXXXXXX 100644
88
--- a/target/riscv/pmp.h
89
+++ b/target/riscv/pmp.h
90
@@ -XXX,XX +XXX,XX @@ typedef enum {
91
MSECCFG_USEED = 1 << 8,
92
MSECCFG_SSEED = 1 << 9,
93
MSECCFG_MLPE = 1 << 10,
94
+ MSECCFG_PMM = 3ULL << 32,
95
} mseccfg_field_t;
96
97
typedef struct {
98
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/riscv/csr.c
101
+++ b/target/riscv/csr.c
102
@@ -XXX,XX +XXX,XX @@ static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
103
if (riscv_cpu_cfg(env)->ext_zkr) {
104
return RISCV_EXCP_NONE;
105
}
106
+ if (riscv_cpu_cfg(env)->ext_smmpm) {
107
+ return RISCV_EXCP_NONE;
108
+ }
109
110
return RISCV_EXCP_ILLEGAL_INST;
111
}
112
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
113
if (env_archcpu(env)->cfg.ext_zicfiss) {
114
mask |= MENVCFG_SSE;
115
}
116
+
117
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
118
+ if (env_archcpu(env)->cfg.ext_smnpm &&
119
+ get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) {
120
+ mask |= MENVCFG_PMM;
121
+ }
122
}
123
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
124
125
@@ -XXX,XX +XXX,XX @@ static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
126
{
127
uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
128
RISCVException ret;
129
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
130
+ if (env_archcpu(env)->cfg.ext_ssnpm &&
131
+ riscv_cpu_mxl(env) == MXL_RV64 &&
132
+ get_field(val, SENVCFG_PMM) != PMM_FIELD_RESERVED) {
133
+ mask |= SENVCFG_PMM;
134
+ }
135
136
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
137
if (ret != RISCV_EXCP_NONE) {
138
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
139
get_field(env->menvcfg, MENVCFG_SSE)) {
140
mask |= HENVCFG_SSE;
141
}
142
+
143
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
144
+ if (env_archcpu(env)->cfg.ext_ssnpm &&
145
+ get_field(val, HENVCFG_PMM) != PMM_FIELD_RESERVED) {
146
+ mask |= HENVCFG_PMM;
147
+ }
148
}
149
150
env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
151
@@ -XXX,XX +XXX,XX @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno,
152
static RISCVException write_hstatus(CPURISCVState *env, int csrno,
153
target_ulong val)
154
{
155
+ uint64_t mask = (target_ulong)-1;
156
if (!env_archcpu(env)->cfg.ext_svukte) {
157
- val = val & (~HSTATUS_HUKTE);
158
+ mask &= ~HSTATUS_HUKTE;
159
}
160
- env->hstatus = val;
161
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
162
+ if (!env_archcpu(env)->cfg.ext_ssnpm ||
163
+ riscv_cpu_mxl(env) != MXL_RV64 ||
164
+ get_field(val, HSTATUS_HUPMM) == PMM_FIELD_RESERVED) {
165
+ mask &= ~HSTATUS_HUPMM;
166
+ }
167
+ env->hstatus = (env->hstatus & ~mask) | (val & mask);
168
+
169
if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
170
qemu_log_mask(LOG_UNIMP,
171
"QEMU does not support mixed HSXLEN options.");
172
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
173
index XXXXXXX..XXXXXXX 100644
174
--- a/target/riscv/pmp.c
175
+++ b/target/riscv/pmp.c
176
@@ -XXX,XX +XXX,XX @@ target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
177
void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
178
{
179
int i;
180
+ uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
181
+ /* Update PMM field only if the value is valid according to Zjpm v1.0 */
182
+ if (riscv_cpu_cfg(env)->ext_smmpm &&
183
+ riscv_cpu_mxl(env) == MXL_RV64 &&
184
+ get_field(val, MSECCFG_PMM) != PMM_FIELD_RESERVED) {
185
+ mask |= MSECCFG_PMM;
186
+ }
187
188
trace_mseccfg_csr_write(env->mhartid, val);
189
190
@@ -XXX,XX +XXX,XX @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
191
192
if (riscv_cpu_cfg(env)->ext_smepmp) {
193
/* Sticky bits */
194
- val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
195
- if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
196
+ val |= (env->mseccfg & mask);
197
+ if ((val ^ env->mseccfg) & mask) {
198
tlb_flush(env_cpu(env));
199
}
200
} else {
201
- val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB);
202
+ mask |= MSECCFG_RLB;
203
+ val &= ~(mask);
204
}
205
206
/* M-mode forward cfi to be enabled if cfi extension is implemented */
207
--
208
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Alexey Baturo <baturo.alexey@gmail.com>
2
1
3
Signed-off-by: Alexey Baturo <baturo.alexey@gmail.com>
4
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-ID: <20250106102346.1100149-4-baturo.alexey@gmail.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
target/riscv/cpu.h | 5 +++
10
target/riscv/cpu_helper.c | 78 +++++++++++++++++++++++++++++++++++++++
11
2 files changed, 83 insertions(+)
12
13
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/cpu.h
16
+++ b/target/riscv/cpu.h
17
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
18
19
bool riscv_cpu_is_32bit(RISCVCPU *cpu);
20
21
+bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
22
+RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
23
+uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
24
+
25
RISCVException riscv_csrr(CPURISCVState *env, int csrno,
26
target_ulong *ret_value);
27
+
28
RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
29
target_ulong *ret_value,
30
target_ulong new_value, target_ulong write_mask);
31
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/cpu_helper.c
34
+++ b/target/riscv/cpu_helper.c
35
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
36
*pflags = flags;
37
}
38
39
+RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
40
+{
41
+#ifndef CONFIG_USER_ONLY
42
+ int priv_mode = cpu_address_mode(env);
43
+
44
+ if (get_field(env->mstatus, MSTATUS_MPRV) &&
45
+ get_field(env->mstatus, MSTATUS_MXR)) {
46
+ return PMM_FIELD_DISABLED;
47
+ }
48
+
49
+ /* Get current PMM field */
50
+ switch (priv_mode) {
51
+ case PRV_M:
52
+ if (riscv_cpu_cfg(env)->ext_smmpm) {
53
+ return get_field(env->mseccfg, MSECCFG_PMM);
54
+ }
55
+ break;
56
+ case PRV_S:
57
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
58
+ if (get_field(env->mstatus, MSTATUS_MPV)) {
59
+ return get_field(env->henvcfg, HENVCFG_PMM);
60
+ } else {
61
+ return get_field(env->menvcfg, MENVCFG_PMM);
62
+ }
63
+ }
64
+ break;
65
+ case PRV_U:
66
+ if (riscv_has_ext(env, RVS)) {
67
+ if (riscv_cpu_cfg(env)->ext_ssnpm) {
68
+ return get_field(env->senvcfg, SENVCFG_PMM);
69
+ }
70
+ } else {
71
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
72
+ return get_field(env->menvcfg, MENVCFG_PMM);
73
+ }
74
+ }
75
+ break;
76
+ default:
77
+ g_assert_not_reached();
78
+ }
79
+ return PMM_FIELD_DISABLED;
80
+#else
81
+ return PMM_FIELD_DISABLED;
82
+#endif
83
+}
84
+
85
+bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
86
+{
87
+#ifndef CONFIG_USER_ONLY
88
+ int satp_mode = 0;
89
+ int priv_mode = cpu_address_mode(env);
90
+
91
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
92
+ satp_mode = get_field(env->satp, SATP32_MODE);
93
+ } else {
94
+ satp_mode = get_field(env->satp, SATP64_MODE);
95
+ }
96
+
97
+ return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
98
+#else
99
+ return false;
100
+#endif
101
+}
102
+
103
+uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
104
+{
105
+ switch (pmm) {
106
+ case PMM_FIELD_DISABLED:
107
+ return 0;
108
+ case PMM_FIELD_PMLEN7:
109
+ return 7;
110
+ case PMM_FIELD_PMLEN16:
111
+ return 16;
112
+ default:
113
+ g_assert_not_reached();
114
+ }
115
+}
116
+
117
#ifndef CONFIG_USER_ONLY
118
119
/*
120
--
121
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Alexey Baturo <baturo.alexey@gmail.com>
2
1
3
Signed-off-by: Alexey Baturo <baturo.alexey@gmail.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
7
Message-ID: <20250106102346.1100149-5-baturo.alexey@gmail.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
target/riscv/cpu.h | 3 +++
11
target/riscv/cpu_helper.c | 3 +++
12
target/riscv/translate.c | 5 +++++
13
3 files changed, 11 insertions(+)
14
15
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu.h
18
+++ b/target/riscv/cpu.h
19
@@ -XXX,XX +XXX,XX @@ FIELD(TB_FLAGS, FCFI_ENABLED, 26, 1)
20
FIELD(TB_FLAGS, FCFI_LP_EXPECTED, 27, 1)
21
/* zicfiss needs a TB flag so that correct TB is located based on tb flags */
22
FIELD(TB_FLAGS, BCFI_ENABLED, 28, 1)
23
+/* If pointer masking should be applied and address sign extended */
24
+FIELD(TB_FLAGS, PM_PMM, 29, 2)
25
+FIELD(TB_FLAGS, PM_SIGNEXTEND, 31, 1)
26
27
#ifdef TARGET_RISCV32
28
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
29
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu_helper.c
32
+++ b/target/riscv/cpu_helper.c
33
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
34
RISCVCPU *cpu = env_archcpu(env);
35
RISCVExtStatus fs, vs;
36
uint32_t flags = 0;
37
+ bool pm_signext = riscv_cpu_virt_mem_enabled(env);
38
39
*pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
40
*cs_base = 0;
41
@@ -XXX,XX +XXX,XX @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
42
flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
43
flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
44
flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
45
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_PMM, riscv_pm_get_pmm(env));
46
+ flags = FIELD_DP32(flags, TB_FLAGS, PM_SIGNEXTEND, pm_signext);
47
48
*pflags = flags;
49
}
50
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
51
index XXXXXXX..XXXXXXX 100644
52
--- a/target/riscv/translate.c
53
+++ b/target/riscv/translate.c
54
@@ -XXX,XX +XXX,XX @@ typedef struct DisasContext {
55
bool vl_eq_vlmax;
56
CPUState *cs;
57
TCGv zero;
58
+ /* actual address width */
59
+ uint8_t addr_xl;
60
+ bool addr_signed;
61
/* Ztso */
62
bool ztso;
63
/* Use icount trigger for native debug */
64
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
65
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
66
ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
67
ctx->cs = cs;
68
+ ctx->addr_xl = 0;
69
+ ctx->addr_signed = false;
70
ctx->ztso = cpu->cfg.ext_ztso;
71
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
72
ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED);
73
--
74
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Alexey Baturo <baturo.alexey@gmail.com>
2
1
3
Signed-off-by: Alexey Baturo <baturo.alexey@gmail.com>
4
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-ID: <20250106102346.1100149-6-baturo.alexey@gmail.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
target/riscv/translate.c | 22 ++++++++++++++++------
10
target/riscv/vector_helper.c | 16 ++++++++++++++++
11
2 files changed, 32 insertions(+), 6 deletions(-)
12
13
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/translate.c
16
+++ b/target/riscv/translate.c
17
@@ -XXX,XX +XXX,XX @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm)
18
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
19
20
tcg_gen_addi_tl(addr, src1, imm);
21
- if (get_address_xl(ctx) == MXL_RV32) {
22
- tcg_gen_ext32u_tl(addr, addr);
23
+ if (ctx->addr_signed) {
24
+ tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
25
+ } else {
26
+ tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
27
}
28
29
return addr;
30
@@ -XXX,XX +XXX,XX @@ static TCGv get_address_indexed(DisasContext *ctx, int rs1, TCGv offs)
31
TCGv src1 = get_gpr(ctx, rs1, EXT_NONE);
32
33
tcg_gen_add_tl(addr, src1, offs);
34
- if (get_xl(ctx) == MXL_RV32) {
35
- tcg_gen_ext32u_tl(addr, addr);
36
+ if (ctx->addr_signed) {
37
+ tcg_gen_sextract_tl(addr, addr, 0, ctx->addr_xl);
38
+ } else {
39
+ tcg_gen_extract_tl(addr, addr, 0, ctx->addr_xl);
40
}
41
42
return addr;
43
@@ -XXX,XX +XXX,XX @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
44
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
45
ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
46
ctx->cs = cs;
47
- ctx->addr_xl = 0;
48
- ctx->addr_signed = false;
49
+ if (get_xl(ctx) == MXL_RV32) {
50
+ ctx->addr_xl = 32;
51
+ ctx->addr_signed = false;
52
+ } else {
53
+ int pm_pmm = FIELD_EX32(tb_flags, TB_FLAGS, PM_PMM);
54
+ ctx->addr_xl = 64 - riscv_pm_get_pmlen(pm_pmm);
55
+ ctx->addr_signed = FIELD_EX32(tb_flags, TB_FLAGS, PM_SIGNEXTEND);
56
+ }
57
ctx->ztso = cpu->cfg.ext_ztso;
58
ctx->itrigger = FIELD_EX32(tb_flags, TB_FLAGS, ITRIGGER);
59
ctx->bcfi_enabled = FIELD_EX32(tb_flags, TB_FLAGS, BCFI_ENABLED);
60
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
61
index XXXXXXX..XXXXXXX 100644
62
--- a/target/riscv/vector_helper.c
63
+++ b/target/riscv/vector_helper.c
64
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
65
66
static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
67
{
68
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
69
+ return addr;
70
+ }
71
+ RISCVPmPmm pmm = riscv_pm_get_pmm(env);
72
+ if (pmm == PMM_FIELD_DISABLED) {
73
+ return addr;
74
+ }
75
+ int pmlen = riscv_pm_get_pmlen(pmm);
76
+ bool signext = riscv_cpu_virt_mem_enabled(env);
77
+ addr = addr << pmlen;
78
+ /* sign/zero extend masked address by N-1 bit */
79
+ if (signext) {
80
+ addr = (target_long)addr >> pmlen;
81
+ } else {
82
+ addr = addr >> pmlen;
83
+ }
84
return addr;
85
}
86
87
--
88
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Alexey Baturo <baturo.alexey@gmail.com>
2
1
3
Signed-off-by: Alexey Baturo <baturo.alexey@gmail.com>
4
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
5
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
6
Message-ID: <20250106102346.1100149-7-baturo.alexey@gmail.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
target/riscv/cpu.h | 1 +
10
target/riscv/internals.h | 54 ++++++++++++++++++++++++++++++++++++
11
target/riscv/cpu_helper.c | 19 +++++++++++++
12
target/riscv/op_helper.c | 16 +++++------
13
target/riscv/vector_helper.c | 21 --------------
14
5 files changed, 82 insertions(+), 29 deletions(-)
15
16
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/cpu.h
19
+++ b/target/riscv/cpu.h
20
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_is_32bit(RISCVCPU *cpu);
21
22
bool riscv_cpu_virt_mem_enabled(CPURISCVState *env);
23
RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env);
24
+RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env);
25
uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm);
26
27
RISCVException riscv_csrr(CPURISCVState *env, int csrno,
28
diff --git a/target/riscv/internals.h b/target/riscv/internals.h
29
index XXXXXXX..XXXXXXX 100644
30
--- a/target/riscv/internals.h
31
+++ b/target/riscv/internals.h
32
@@ -XXX,XX +XXX,XX @@ static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
33
/* Our implementation of CPUClass::has_work */
34
bool riscv_cpu_has_work(CPUState *cs);
35
36
+/* Zjpm addr masking routine */
37
+static inline target_ulong adjust_addr_body(CPURISCVState *env,
38
+ target_ulong addr,
39
+ bool is_virt_addr)
40
+{
41
+ RISCVPmPmm pmm = PMM_FIELD_DISABLED;
42
+ uint32_t pmlen = 0;
43
+ bool signext = false;
44
+
45
+ /* do nothing for rv32 mode */
46
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
47
+ return addr;
48
+ }
49
+
50
+ /* get pmm field depending on whether addr is */
51
+ if (is_virt_addr) {
52
+ pmm = riscv_pm_get_virt_pmm(env);
53
+ } else {
54
+ pmm = riscv_pm_get_pmm(env);
55
+ }
56
+
57
+ /* if pointer masking is disabled, return original addr */
58
+ if (pmm == PMM_FIELD_DISABLED) {
59
+ return addr;
60
+ }
61
+
62
+ if (!is_virt_addr) {
63
+ signext = riscv_cpu_virt_mem_enabled(env);
64
+ }
65
+ addr = addr << pmlen;
66
+ pmlen = riscv_pm_get_pmlen(pmm);
67
+
68
+ /* sign/zero extend masked address by N-1 bit */
69
+ if (signext) {
70
+ addr = (target_long)addr >> pmlen;
71
+ } else {
72
+ addr = addr >> pmlen;
73
+ }
74
+
75
+ return addr;
76
+}
77
+
78
+static inline target_ulong adjust_addr(CPURISCVState *env,
79
+ target_ulong addr)
80
+{
81
+ return adjust_addr_body(env, addr, false);
82
+}
83
+
84
+static inline target_ulong adjust_addr_virt(CPURISCVState *env,
85
+ target_ulong addr)
86
+{
87
+ return adjust_addr_body(env, addr, true);
88
+}
89
+
90
#endif
91
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
92
index XXXXXXX..XXXXXXX 100644
93
--- a/target/riscv/cpu_helper.c
94
+++ b/target/riscv/cpu_helper.c
95
@@ -XXX,XX +XXX,XX @@ RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
96
#endif
97
}
98
99
+RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
100
+{
101
+#ifndef CONFIG_USER_ONLY
102
+ int priv_mode = cpu_address_mode(env);
103
+
104
+ if (priv_mode == PRV_U) {
105
+ return get_field(env->hstatus, HSTATUS_HUPMM);
106
+ } else {
107
+ if (get_field(env->hstatus, HSTATUS_SPVP)) {
108
+ return get_field(env->henvcfg, HENVCFG_PMM);
109
+ } else {
110
+ return get_field(env->senvcfg, SENVCFG_PMM);
111
+ }
112
+ }
113
+#else
114
+ return PMM_FIELD_DISABLED;
115
+#endif
116
+}
117
+
118
bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
119
{
120
#ifndef CONFIG_USER_ONLY
121
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
122
index XXXXXXX..XXXXXXX 100644
123
--- a/target/riscv/op_helper.c
124
+++ b/target/riscv/op_helper.c
125
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_hlv_bu(CPURISCVState *env, target_ulong addr)
126
int mmu_idx = check_access_hlsv(env, false, ra);
127
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
128
129
- return cpu_ldb_mmu(env, addr, oi, ra);
130
+ return cpu_ldb_mmu(env, adjust_addr_virt(env, addr), oi, ra);
131
}
132
133
target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
134
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_hlv_hu(CPURISCVState *env, target_ulong addr)
135
int mmu_idx = check_access_hlsv(env, false, ra);
136
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
137
138
- return cpu_ldw_mmu(env, addr, oi, ra);
139
+ return cpu_ldw_mmu(env, adjust_addr_virt(env, addr), oi, ra);
140
}
141
142
target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
143
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_hlv_wu(CPURISCVState *env, target_ulong addr)
144
int mmu_idx = check_access_hlsv(env, false, ra);
145
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
146
147
- return cpu_ldl_mmu(env, addr, oi, ra);
148
+ return cpu_ldl_mmu(env, adjust_addr_virt(env, addr), oi, ra);
149
}
150
151
target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
152
@@ -XXX,XX +XXX,XX @@ target_ulong helper_hyp_hlv_d(CPURISCVState *env, target_ulong addr)
153
int mmu_idx = check_access_hlsv(env, false, ra);
154
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
155
156
- return cpu_ldq_mmu(env, addr, oi, ra);
157
+ return cpu_ldq_mmu(env, adjust_addr_virt(env, addr), oi, ra);
158
}
159
160
void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
161
@@ -XXX,XX +XXX,XX @@ void helper_hyp_hsv_b(CPURISCVState *env, target_ulong addr, target_ulong val)
162
int mmu_idx = check_access_hlsv(env, false, ra);
163
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
164
165
- cpu_stb_mmu(env, addr, val, oi, ra);
166
+ cpu_stb_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
167
}
168
169
void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
170
@@ -XXX,XX +XXX,XX @@ void helper_hyp_hsv_h(CPURISCVState *env, target_ulong addr, target_ulong val)
171
int mmu_idx = check_access_hlsv(env, false, ra);
172
MemOpIdx oi = make_memop_idx(MO_TEUW, mmu_idx);
173
174
- cpu_stw_mmu(env, addr, val, oi, ra);
175
+ cpu_stw_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
176
}
177
178
void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
179
@@ -XXX,XX +XXX,XX @@ void helper_hyp_hsv_w(CPURISCVState *env, target_ulong addr, target_ulong val)
180
int mmu_idx = check_access_hlsv(env, false, ra);
181
MemOpIdx oi = make_memop_idx(MO_TEUL, mmu_idx);
182
183
- cpu_stl_mmu(env, addr, val, oi, ra);
184
+ cpu_stl_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
185
}
186
187
void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
188
@@ -XXX,XX +XXX,XX @@ void helper_hyp_hsv_d(CPURISCVState *env, target_ulong addr, target_ulong val)
189
int mmu_idx = check_access_hlsv(env, false, ra);
190
MemOpIdx oi = make_memop_idx(MO_TEUQ, mmu_idx);
191
192
- cpu_stq_mmu(env, addr, val, oi, ra);
193
+ cpu_stq_mmu(env, adjust_addr_virt(env, addr), val, oi, ra);
194
}
195
196
/*
197
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
198
index XXXXXXX..XXXXXXX 100644
199
--- a/target/riscv/vector_helper.c
200
+++ b/target/riscv/vector_helper.c
201
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
202
return scale < 0 ? vlenb >> -scale : vlenb << scale;
203
}
204
205
-static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
206
-{
207
- if (riscv_cpu_mxl(env) == MXL_RV32) {
208
- return addr;
209
- }
210
- RISCVPmPmm pmm = riscv_pm_get_pmm(env);
211
- if (pmm == PMM_FIELD_DISABLED) {
212
- return addr;
213
- }
214
- int pmlen = riscv_pm_get_pmlen(pmm);
215
- bool signext = riscv_cpu_virt_mem_enabled(env);
216
- addr = addr << pmlen;
217
- /* sign/zero extend masked address by N-1 bit */
218
- if (signext) {
219
- addr = (target_long)addr >> pmlen;
220
- } else {
221
- addr = addr >> pmlen;
222
- }
223
- return addr;
224
-}
225
-
226
/*
227
* This function checks watchpoint before real load operation.
228
*
229
--
230
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Alexey Baturo <baturo.alexey@gmail.com>
2
1
3
Signed-off-by: Alexey Baturo <baturo.alexey@gmail.com>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Message-ID: <20250106102346.1100149-8-baturo.alexey@gmail.com>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
8
target/riscv/cpu.c | 6 ++++++
9
1 file changed, 6 insertions(+)
10
11
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
12
index XXXXXXX..XXXXXXX 100644
13
--- a/target/riscv/cpu.c
14
+++ b/target/riscv/cpu.c
15
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
16
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
17
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
18
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
19
+ ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
20
+ ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm),
21
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
22
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
23
ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
24
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
25
ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
26
+ ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
27
ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
28
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
29
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
30
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
31
MULTI_EXT_CFG_BOOL("zvfh", ext_zvfh, false),
32
MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
33
MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
34
+ MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false),
35
36
MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
37
MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
38
+ MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false),
39
+ MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false),
40
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
41
MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
42
MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
43
--
44
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Tommy Wu <tommy.wu@sifive.com>
2
1
3
The boolean variable 'ext_smrnmi' is used to determine whether the
4
Smrnmi extension exists.
5
6
Signed-off-by: Frank Chang <frank.chang@sifive.com>
7
Signed-off-by: Tommy Wu <tommy.wu@sifive.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20250106054336.1878291-2-frank.chang@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/cpu_cfg.h | 1 +
13
1 file changed, 1 insertion(+)
14
15
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu_cfg.h
18
+++ b/target/riscv/cpu_cfg.h
19
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
20
bool ext_ssaia;
21
bool ext_sscofpmf;
22
bool ext_smepmp;
23
+ bool ext_smrnmi;
24
bool ext_ssnpm;
25
bool ext_smnpm;
26
bool ext_smmpm;
27
--
28
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Tommy Wu <tommy.wu@sifive.com>
2
1
3
The Smrnmi extension adds the 'mnscratch', 'mnepc', 'mncause',
4
'mnstatus' CSRs.
5
6
Signed-off-by: Frank Chang <frank.chang@sifive.com>
7
Signed-off-by: Tommy Wu <tommy.wu@sifive.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20250106054336.1878291-3-frank.chang@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/cpu.h | 7 ++++
13
target/riscv/cpu_bits.h | 11 ++++++
14
target/riscv/cpu.c | 5 +++
15
target/riscv/csr.c | 82 +++++++++++++++++++++++++++++++++++++++++
16
4 files changed, 105 insertions(+)
17
18
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/cpu.h
21
+++ b/target/riscv/cpu.h
22
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
23
uint64_t kvm_timer_state;
24
uint64_t kvm_timer_frequency;
25
#endif /* CONFIG_KVM */
26
+
27
+ /* RNMI */
28
+ target_ulong mnscratch;
29
+ target_ulong mnepc;
30
+ target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
31
+ target_ulong mnstatus;
32
+ target_ulong rnmip;
33
};
34
35
/*
36
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
37
index XXXXXXX..XXXXXXX 100644
38
--- a/target/riscv/cpu_bits.h
39
+++ b/target/riscv/cpu_bits.h
40
@@ -XXX,XX +XXX,XX @@
41
#define CSR_PMPADDR14 0x3be
42
#define CSR_PMPADDR15 0x3bf
43
44
+/* RNMI */
45
+#define CSR_MNSCRATCH 0x740
46
+#define CSR_MNEPC 0x741
47
+#define CSR_MNCAUSE 0x742
48
+#define CSR_MNSTATUS 0x744
49
+
50
/* Debug/Trace Registers (shared with Debug Mode) */
51
#define CSR_TSELECT 0x7a0
52
#define CSR_TDATA1 0x7a1
53
@@ -XXX,XX +XXX,XX @@ typedef enum {
54
#define SATP64_ASID 0x0FFFF00000000000ULL
55
#define SATP64_PPN 0x00000FFFFFFFFFFFULL
56
57
+/* RNMI mnstatus CSR mask */
58
+#define MNSTATUS_NMIE 0x00000008
59
+#define MNSTATUS_MNPV 0x00000080
60
+#define MNSTATUS_MNPP 0x00001800
61
+
62
/* VM modes (satp.mode) privileged ISA 1.10 */
63
#define VM_1_10_MBARE 0
64
#define VM_1_10_SV32 1
65
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
66
index XXXXXXX..XXXXXXX 100644
67
--- a/target/riscv/cpu.c
68
+++ b/target/riscv/cpu.c
69
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
70
riscv_trigger_reset_hold(env);
71
}
72
73
+ if (cpu->cfg.ext_smrnmi) {
74
+ env->rnmip = 0;
75
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
76
+ }
77
+
78
if (kvm_enabled()) {
79
kvm_riscv_reset_vcpu(cpu);
80
}
81
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/riscv/csr.c
84
+++ b/target/riscv/csr.c
85
@@ -XXX,XX +XXX,XX @@ static RISCVException debug(CPURISCVState *env, int csrno)
86
87
return RISCV_EXCP_ILLEGAL_INST;
88
}
89
+
90
+static RISCVException rnmi(CPURISCVState *env, int csrno)
91
+{
92
+ RISCVCPU *cpu = env_archcpu(env);
93
+
94
+ if (cpu->cfg.ext_smrnmi) {
95
+ return RISCV_EXCP_NONE;
96
+ }
97
+
98
+ return RISCV_EXCP_ILLEGAL_INST;
99
+}
100
#endif
101
102
static RISCVException seed(CPURISCVState *env, int csrno)
103
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mcontext(CPURISCVState *env, int csrno,
104
return RISCV_EXCP_NONE;
105
}
106
107
+static RISCVException read_mnscratch(CPURISCVState *env, int csrno,
108
+ target_ulong *val)
109
+{
110
+ *val = env->mnscratch;
111
+ return RISCV_EXCP_NONE;
112
+}
113
+
114
+static int write_mnscratch(CPURISCVState *env, int csrno, target_ulong val)
115
+{
116
+ env->mnscratch = val;
117
+ return RISCV_EXCP_NONE;
118
+}
119
+
120
+static int read_mnepc(CPURISCVState *env, int csrno, target_ulong *val)
121
+{
122
+ *val = env->mnepc;
123
+ return RISCV_EXCP_NONE;
124
+}
125
+
126
+static int write_mnepc(CPURISCVState *env, int csrno, target_ulong val)
127
+{
128
+ env->mnepc = val;
129
+ return RISCV_EXCP_NONE;
130
+}
131
+
132
+static int read_mncause(CPURISCVState *env, int csrno, target_ulong *val)
133
+{
134
+ *val = env->mncause;
135
+ return RISCV_EXCP_NONE;
136
+}
137
+
138
+static int write_mncause(CPURISCVState *env, int csrno, target_ulong val)
139
+{
140
+ env->mncause = val;
141
+ return RISCV_EXCP_NONE;
142
+}
143
+
144
+static int read_mnstatus(CPURISCVState *env, int csrno, target_ulong *val)
145
+{
146
+ *val = env->mnstatus;
147
+ return RISCV_EXCP_NONE;
148
+}
149
+
150
+static int write_mnstatus(CPURISCVState *env, int csrno, target_ulong val)
151
+{
152
+ target_ulong mask = (MNSTATUS_NMIE | MNSTATUS_MNPP);
153
+
154
+ if (riscv_has_ext(env, RVH)) {
155
+ /* Flush tlb on mnstatus fields that affect VM. */
156
+ if ((val ^ env->mnstatus) & MNSTATUS_MNPV) {
157
+ tlb_flush(env_cpu(env));
158
+ }
159
+
160
+ mask |= MNSTATUS_MNPV;
161
+ }
162
+
163
+ /* mnstatus.mnie can only be cleared by hardware. */
164
+ env->mnstatus = (env->mnstatus & MNSTATUS_NMIE) | (val & mask);
165
+ return RISCV_EXCP_NONE;
166
+}
167
+
168
#endif
169
170
/* Crypto Extension */
171
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
172
write_sstateen_1_3,
173
.min_priv_ver = PRIV_VERSION_1_12_0 },
174
175
+ /* RNMI */
176
+ [CSR_MNSCRATCH] = { "mnscratch", rnmi, read_mnscratch, write_mnscratch,
177
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
178
+ [CSR_MNEPC] = { "mnepc", rnmi, read_mnepc, write_mnepc,
179
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
180
+ [CSR_MNCAUSE] = { "mncause", rnmi, read_mncause, write_mncause,
181
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
182
+ [CSR_MNSTATUS] = { "mnstatus", rnmi, read_mnstatus, write_mnstatus,
183
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
184
+
185
/* Supervisor Trap Setup */
186
[CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
187
NULL, read_sstatus_i128 },
188
--
189
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Tommy Wu <tommy.wu@sifive.com>
2
1
3
Because the RNMI interrupt trap handler address is implementation defined.
4
We add the 'rnmi-interrupt-vector' and 'rnmi-exception-vector' as the property
5
of the harts. It’s very easy for users to set the address based on their
6
expectation. This patch also adds the functionality to handle the RNMI signals.
7
8
Signed-off-by: Frank Chang <frank.chang@sifive.com>
9
Signed-off-by: Tommy Wu <tommy.wu@sifive.com>
10
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-ID: <20250106054336.1878291-4-frank.chang@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
include/hw/riscv/riscv_hart.h | 4 ++
16
target/riscv/cpu.h | 3 ++
17
target/riscv/cpu_bits.h | 12 +++++
18
hw/riscv/riscv_hart.c | 41 ++++++++++++++++
19
target/riscv/cpu.c | 11 +++++
20
target/riscv/cpu_helper.c | 88 ++++++++++++++++++++++++++++++++---
21
6 files changed, 152 insertions(+), 7 deletions(-)
22
23
diff --git a/include/hw/riscv/riscv_hart.h b/include/hw/riscv/riscv_hart.h
24
index XXXXXXX..XXXXXXX 100644
25
--- a/include/hw/riscv/riscv_hart.h
26
+++ b/include/hw/riscv/riscv_hart.h
27
@@ -XXX,XX +XXX,XX @@ struct RISCVHartArrayState {
28
uint32_t hartid_base;
29
char *cpu_type;
30
uint64_t resetvec;
31
+ uint32_t num_rnmi_irqvec;
32
+ uint64_t *rnmi_irqvec;
33
+ uint32_t num_rnmi_excpvec;
34
+ uint64_t *rnmi_excpvec;
35
RISCVCPU *harts;
36
};
37
38
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
39
index XXXXXXX..XXXXXXX 100644
40
--- a/target/riscv/cpu.h
41
+++ b/target/riscv/cpu.h
42
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
43
target_ulong mncause; /* mncause without bit XLEN-1 set to 1 */
44
target_ulong mnstatus;
45
target_ulong rnmip;
46
+ uint64_t rnmi_irqvec;
47
+ uint64_t rnmi_excpvec;
48
};
49
50
/*
51
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
52
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts);
53
uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask,
54
uint64_t value);
55
+void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level);
56
void riscv_cpu_interrupt(CPURISCVState *env);
57
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
58
void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void *),
59
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
60
index XXXXXXX..XXXXXXX 100644
61
--- a/target/riscv/cpu_bits.h
62
+++ b/target/riscv/cpu_bits.h
63
@@ -XXX,XX +XXX,XX @@ typedef enum {
64
/* Default Reset Vector address */
65
#define DEFAULT_RSTVEC 0x1000
66
67
+/* Default RNMI Interrupt Vector address */
68
+#define DEFAULT_RNMI_IRQVEC 0x0
69
+
70
+/* Default RNMI Exception Vector address */
71
+#define DEFAULT_RNMI_EXCPVEC 0x0
72
+
73
/* Exception causes */
74
typedef enum RISCVException {
75
RISCV_EXCP_NONE = -1, /* sentinel value */
76
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
77
/* -1 is due to bit zero of hgeip and hgeie being ROZ. */
78
#define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1)
79
80
+/* RNMI causes */
81
+#define RNMI_MAX 16
82
+
83
/* mip masks */
84
#define MIP_USIP (1 << IRQ_U_SOFT)
85
#define MIP_SSIP (1 << IRQ_S_SOFT)
86
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
87
#define MHPMEVENT_IDX_MASK 0xFFFFF
88
#define MHPMEVENT_SSCOF_RESVD 16
89
90
+/* RISC-V-specific interrupt pending bits. */
91
+#define CPU_INTERRUPT_RNMI CPU_INTERRUPT_TGT_EXT_0
92
+
93
/* JVT CSR bits */
94
#define JVT_MODE 0x3F
95
#define JVT_BASE (~0x3F)
96
diff --git a/hw/riscv/riscv_hart.c b/hw/riscv/riscv_hart.c
97
index XXXXXXX..XXXXXXX 100644
98
--- a/hw/riscv/riscv_hart.c
99
+++ b/hw/riscv/riscv_hart.c
100
@@ -XXX,XX +XXX,XX @@
101
#include "target/riscv/cpu.h"
102
#include "hw/qdev-properties.h"
103
#include "hw/riscv/riscv_hart.h"
104
+#include "qemu/error-report.h"
105
106
static const Property riscv_harts_props[] = {
107
DEFINE_PROP_UINT32("num-harts", RISCVHartArrayState, num_harts, 1),
108
@@ -XXX,XX +XXX,XX @@ static const Property riscv_harts_props[] = {
109
DEFINE_PROP_STRING("cpu-type", RISCVHartArrayState, cpu_type),
110
DEFINE_PROP_UINT64("resetvec", RISCVHartArrayState, resetvec,
111
DEFAULT_RSTVEC),
112
+
113
+ /*
114
+ * Smrnmi implementation-defined interrupt and exception trap handlers.
115
+ *
116
+ * When an RNMI interrupt is detected, the hart then enters M-mode and
117
+ * jumps to the address defined by "rnmi-interrupt-vector".
118
+ *
119
+ * When the hart encounters an exception while executing in M-mode with
120
+ * the mnstatus.NMIE bit clear, the hart then jumps to the address
121
+ * defined by "rnmi-exception-vector".
122
+ */
123
+ DEFINE_PROP_ARRAY("rnmi-interrupt-vector", RISCVHartArrayState,
124
+ num_rnmi_irqvec, rnmi_irqvec, qdev_prop_uint64,
125
+ uint64_t),
126
+ DEFINE_PROP_ARRAY("rnmi-exception-vector", RISCVHartArrayState,
127
+ num_rnmi_excpvec, rnmi_excpvec, qdev_prop_uint64,
128
+ uint64_t),
129
};
130
131
static void riscv_harts_cpu_reset(void *opaque)
132
@@ -XXX,XX +XXX,XX @@ static bool riscv_hart_realize(RISCVHartArrayState *s, int idx,
133
{
134
object_initialize_child(OBJECT(s), "harts[*]", &s->harts[idx], cpu_type);
135
qdev_prop_set_uint64(DEVICE(&s->harts[idx]), "resetvec", s->resetvec);
136
+
137
+ if (s->harts[idx].cfg.ext_smrnmi) {
138
+ if (idx < s->num_rnmi_irqvec) {
139
+ qdev_prop_set_uint64(DEVICE(&s->harts[idx]),
140
+ "rnmi-interrupt-vector", s->rnmi_irqvec[idx]);
141
+ }
142
+
143
+ if (idx < s->num_rnmi_excpvec) {
144
+ qdev_prop_set_uint64(DEVICE(&s->harts[idx]),
145
+ "rnmi-exception-vector", s->rnmi_excpvec[idx]);
146
+ }
147
+ } else {
148
+ if (s->num_rnmi_irqvec > 0) {
149
+ warn_report_once("rnmi-interrupt-vector property is ignored "
150
+ "because Smrnmi extension is not enabled.");
151
+ }
152
+
153
+ if (s->num_rnmi_excpvec > 0) {
154
+ warn_report_once("rnmi-exception-vector property is ignored "
155
+ "because Smrnmi extension is not enabled.");
156
+ }
157
+ }
158
+
159
s->harts[idx].env.mhartid = s->hartid_base + idx;
160
qemu_register_reset(riscv_harts_cpu_reset, &s->harts[idx]);
161
return qdev_realize(DEVICE(&s->harts[idx]), NULL, errp);
162
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
163
index XXXXXXX..XXXXXXX 100644
164
--- a/target/riscv/cpu.c
165
+++ b/target/riscv/cpu.c
166
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_set_irq(void *opaque, int irq, int level)
167
g_assert_not_reached();
168
}
169
}
170
+
171
+static void riscv_cpu_set_nmi(void *opaque, int irq, int level)
172
+{
173
+ riscv_cpu_set_rnmi(RISCV_CPU(opaque), irq, level);
174
+}
175
#endif /* CONFIG_USER_ONLY */
176
177
static bool riscv_cpu_is_dynamic(Object *cpu_obj)
178
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_init(Object *obj)
179
#ifndef CONFIG_USER_ONLY
180
qdev_init_gpio_in(DEVICE(obj), riscv_cpu_set_irq,
181
IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
182
+ qdev_init_gpio_in_named(DEVICE(cpu), riscv_cpu_set_nmi,
183
+ "riscv.cpu.rnmi", RNMI_MAX);
184
#endif /* CONFIG_USER_ONLY */
185
186
general_user_opts = g_hash_table_new(g_str_hash, g_str_equal);
187
@@ -XXX,XX +XXX,XX @@ static const Property riscv_cpu_properties[] = {
188
189
#ifndef CONFIG_USER_ONLY
190
DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
191
+ DEFINE_PROP_UINT64("rnmi-interrupt-vector", RISCVCPU, env.rnmi_irqvec,
192
+ DEFAULT_RNMI_IRQVEC),
193
+ DEFINE_PROP_UINT64("rnmi-exception-vector", RISCVCPU, env.rnmi_excpvec,
194
+ DEFAULT_RNMI_EXCPVEC),
195
#endif
196
197
DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
198
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/target/riscv/cpu_helper.c
201
+++ b/target/riscv/cpu_helper.c
202
@@ -XXX,XX +XXX,XX @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
203
uint64_t vsbits, irq_delegated;
204
int virq;
205
206
+ /* Priority: RNMI > Other interrupt. */
207
+ if (riscv_cpu_cfg(env)->ext_smrnmi) {
208
+ /* If mnstatus.NMIE == 0, all interrupts are disabled. */
209
+ if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
210
+ return RISCV_EXCP_NONE;
211
+ }
212
+
213
+ if (env->rnmip) {
214
+ return ctz64(env->rnmip); /* since non-zero */
215
+ }
216
+ }
217
+
218
/* Determine interrupt enable state of all privilege modes */
219
if (env->virt_enabled) {
220
mie = 1;
221
@@ -XXX,XX +XXX,XX @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
222
223
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
224
{
225
- if (interrupt_request & CPU_INTERRUPT_HARD) {
226
+ uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
227
+
228
+ if (interrupt_request & mask) {
229
RISCVCPU *cpu = RISCV_CPU(cs);
230
CPURISCVState *env = &cpu->env;
231
int interruptno = riscv_cpu_local_irq_pending(env);
232
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
233
env->geilen = geilen;
234
}
235
236
+void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
237
+{
238
+ CPURISCVState *env = &cpu->env;
239
+ CPUState *cs = CPU(cpu);
240
+ bool release_lock = false;
241
+
242
+ if (!bql_locked()) {
243
+ release_lock = true;
244
+ bql_lock();
245
+ }
246
+
247
+ if (level) {
248
+ env->rnmip |= 1 << irq;
249
+ cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
250
+ } else {
251
+ env->rnmip &= ~(1 << irq);
252
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
253
+ }
254
+
255
+ if (release_lock) {
256
+ bql_unlock();
257
+ }
258
+}
259
+
260
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
261
{
262
CPURISCVState *env = &cpu->env;
263
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
264
bool write_gva = false;
265
bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
266
uint64_t s;
267
+ int mode;
268
269
/*
270
* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
271
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
272
target_ulong htval = 0;
273
target_ulong mtval2 = 0;
274
int sxlen = 0;
275
- int mxlen = 0;
276
+ int mxlen = 16 << riscv_cpu_mxl(env);
277
+ bool nnmi_excep = false;
278
+
279
+ if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
280
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
281
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV,
282
+ env->virt_enabled);
283
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP,
284
+ env->priv);
285
+ env->mncause = cause | ((target_ulong)1U << (mxlen - 1));
286
+ env->mnepc = env->pc;
287
+ env->pc = env->rnmi_irqvec;
288
+
289
+ /* Trapping to M mode, virt is disabled */
290
+ riscv_cpu_set_mode(env, PRV_M, false);
291
+
292
+ return;
293
+ }
294
295
if (!async) {
296
/* set tval to badaddr for traps with address information */
297
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
298
__func__, env->mhartid, async, cause, env->pc, tval,
299
riscv_cpu_get_trap_name(cause, async));
300
301
- if (env->priv <= PRV_S && cause < 64 &&
302
- (((deleg >> cause) & 1) || s_injected || vs_injected)) {
303
+ mode = env->priv <= PRV_S && cause < 64 &&
304
+ (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
305
+
306
+ if (mode == PRV_S) {
307
/* handle the trap in S-mode */
308
/* save elp status */
309
if (cpu_get_fcfien(env)) {
310
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
311
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
312
riscv_cpu_set_mode(env, PRV_S, virt);
313
} else {
314
+ /*
315
+ * If the hart encounters an exception while executing in M-mode
316
+ * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
317
+ */
318
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
319
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
320
+ !async;
321
+
322
/* handle the trap in M-mode */
323
/* save elp status */
324
if (cpu_get_fcfien(env)) {
325
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
326
s = set_field(s, MSTATUS_MPP, env->priv);
327
s = set_field(s, MSTATUS_MIE, 0);
328
env->mstatus = s;
329
- mxlen = 16 << riscv_cpu_mxl(env);
330
env->mcause = cause | ((target_ulong)async << (mxlen - 1));
331
env->mepc = env->pc;
332
env->mtval = tval;
333
env->mtval2 = mtval2;
334
env->mtinst = tinst;
335
- env->pc = (env->mtvec >> 2 << 2) +
336
- ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
337
+
338
+ /*
339
+ * For RNMI exception, program counter is set to the RNMI exception
340
+ * trap handler address.
341
+ */
342
+ if (nnmi_excep) {
343
+ env->pc = env->rnmi_excpvec;
344
+ } else {
345
+ env->pc = (env->mtvec >> 2 << 2) +
346
+ ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
347
+ }
348
riscv_cpu_set_mode(env, PRV_M, virt);
349
}
350
351
--
352
2.47.1
353
354
diff view generated by jsdifflib
Deleted patch
1
From: Tommy Wu <tommy.wu@sifive.com>
2
1
3
This patch adds a new instruction 'mnret'. 'mnret' is an M-mode-only
4
instruction that uses the values in `mnepc` and `mnstatus` to return to the
5
program counter, privilege mode, and virtualization mode of the
6
interrupted context.
7
8
Signed-off-by: Frank Chang <frank.chang@sifive.com>
9
Signed-off-by: Tommy Wu <tommy.wu@sifive.com>
10
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-ID: <20250106054336.1878291-5-frank.chang@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/helper.h | 1 +
16
target/riscv/insn32.decode | 3 ++
17
target/riscv/op_helper.c | 45 ++++++++++++++++---
18
.../riscv/insn_trans/trans_privileged.c.inc | 20 +++++++++
19
4 files changed, 64 insertions(+), 5 deletions(-)
20
21
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/helper.h
24
+++ b/target/riscv/helper.h
25
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
26
#ifndef CONFIG_USER_ONLY
27
DEF_HELPER_1(sret, tl, env)
28
DEF_HELPER_1(mret, tl, env)
29
+DEF_HELPER_1(mnret, tl, env)
30
DEF_HELPER_1(wfi, void, env)
31
DEF_HELPER_1(wrs_nto, void, env)
32
DEF_HELPER_1(tlb_flush, void, env)
33
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
34
index XXXXXXX..XXXXXXX 100644
35
--- a/target/riscv/insn32.decode
36
+++ b/target/riscv/insn32.decode
37
@@ -XXX,XX +XXX,XX @@ wfi 0001000 00101 00000 000 00000 1110011
38
sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
39
sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
40
41
+# *** NMI ***
42
+mnret 0111000 00010 00000 000 00000 1110011
43
+
44
# *** RV32I Base Instruction Set ***
45
lui .................... ..... 0110111 @u
46
{
47
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/target/riscv/op_helper.c
50
+++ b/target/riscv/op_helper.c
51
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env)
52
return retpc;
53
}
54
55
-target_ulong helper_mret(CPURISCVState *env)
56
+static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
57
+ target_ulong prev_priv)
58
{
59
if (!(env->priv >= PRV_M)) {
60
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
61
}
62
63
- target_ulong retpc = env->mepc;
64
if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
65
riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
66
}
67
68
- uint64_t mstatus = env->mstatus;
69
- target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
70
-
71
if (riscv_cpu_cfg(env)->pmp &&
72
!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
73
riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
74
}
75
+}
76
+
77
+target_ulong helper_mret(CPURISCVState *env)
78
+{
79
+ target_ulong retpc = env->mepc;
80
+ uint64_t mstatus = env->mstatus;
81
+ target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
82
+
83
+ check_ret_from_m_mode(env, retpc, prev_priv);
84
85
target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) &&
86
(prev_priv != PRV_M);
87
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mret(CPURISCVState *env)
88
return retpc;
89
}
90
91
+target_ulong helper_mnret(CPURISCVState *env)
92
+{
93
+ target_ulong retpc = env->mnepc;
94
+ target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP);
95
+ target_ulong prev_virt;
96
+
97
+ check_ret_from_m_mode(env, retpc, prev_priv);
98
+
99
+ prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) &&
100
+ (prev_priv != PRV_M);
101
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, true);
102
+
103
+ /*
104
+ * If MNRET changes the privilege mode to a mode
105
+ * less privileged than M, it also sets mstatus.MPRV to 0.
106
+ */
107
+ if (prev_priv < PRV_M) {
108
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPRV, false);
109
+ }
110
+
111
+ if (riscv_has_ext(env, RVH) && prev_virt) {
112
+ riscv_cpu_swap_hypervisor_regs(env);
113
+ }
114
+
115
+ riscv_cpu_set_mode(env, prev_priv, prev_virt);
116
+
117
+ return retpc;
118
+}
119
+
120
void helper_wfi(CPURISCVState *env)
121
{
122
CPUState *cs = env_cpu(env);
123
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
124
index XXXXXXX..XXXXXXX 100644
125
--- a/target/riscv/insn_trans/trans_privileged.c.inc
126
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
127
@@ -XXX,XX +XXX,XX @@
128
* this program. If not, see <http://www.gnu.org/licenses/>.
129
*/
130
131
+#define REQUIRE_SMRNMI(ctx) do { \
132
+ if (!ctx->cfg_ptr->ext_smrnmi) { \
133
+ return false; \
134
+ } \
135
+} while (0)
136
+
137
static bool trans_ecall(DisasContext *ctx, arg_ecall *a)
138
{
139
/* always generates U-level ECALL, fixed in do_interrupt handler */
140
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
141
#endif
142
}
143
144
+static bool trans_mnret(DisasContext *ctx, arg_mnret *a)
145
+{
146
+#ifndef CONFIG_USER_ONLY
147
+ REQUIRE_SMRNMI(ctx);
148
+ decode_save_opc(ctx, 0);
149
+ gen_helper_mnret(cpu_pc, tcg_env);
150
+ tcg_gen_exit_tb(NULL, 0); /* no chaining */
151
+ ctx->base.is_jmp = DISAS_NORETURN;
152
+ return true;
153
+#else
154
+ return false;
155
+#endif
156
+}
157
+
158
static bool trans_wfi(DisasContext *ctx, arg_wfi *a)
159
{
160
#ifndef CONFIG_USER_ONLY
161
--
162
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Tommy Wu <tommy.wu@sifive.com>
2
1
3
This adds the properties for ISA extension Smrnmi.
4
5
Also, when Smrnmi is present, the firmware (e.g., OpenSBI) must set
6
mnstatus.NMIE to 1 before enabling any interrupts. Otherwise, all
7
interrupts will be disabled. Since our current OpenSBI does not
8
support Smrnmi yet, let's disable Smrnmi for the 'max' type CPU for
9
now. We can re-enable it once OpenSBI includes proper support for it.
10
11
Signed-off-by: Frank Chang <frank.chang@sifive.com>
12
Signed-off-by: Tommy Wu <tommy.wu@sifive.com>
13
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
15
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
16
Message-ID: <20250106054336.1878291-6-frank.chang@sifive.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
18
---
19
target/riscv/cpu.c | 2 ++
20
target/riscv/tcg/tcg-cpu.c | 9 +++++++++
21
2 files changed, 11 insertions(+)
22
23
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
24
index XXXXXXX..XXXXXXX 100644
25
--- a/target/riscv/cpu.c
26
+++ b/target/riscv/cpu.c
27
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
28
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
29
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
30
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
31
+ ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi),
32
ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
33
ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm),
34
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
35
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
36
37
MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
38
MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
39
+ MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false),
40
MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false),
41
MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false),
42
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
43
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/riscv/tcg/tcg-cpu.c
46
+++ b/target/riscv/tcg/tcg-cpu.c
47
@@ -XXX,XX +XXX,XX @@ static void riscv_init_max_cpu_extensions(Object *obj)
48
if (env->misa_mxl != MXL_RV32) {
49
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
50
}
51
+
52
+ /*
53
+ * ext_smrnmi requires OpenSBI changes that our current
54
+ * image does not have. Disable it for now.
55
+ */
56
+ if (cpu->cfg.ext_smrnmi) {
57
+ isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
58
+ qemu_log("Smrnmi is disabled in the 'max' type CPU\n");
59
+ }
60
}
61
62
static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
63
--
64
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Frank Chang <frank.chang@sifive.com>
2
1
3
Zicfilp extension introduces the MNPELP (bit 9) in mnstatus.
4
The MNPELP field holds the previous ELP.
5
6
When a RNMI trap is delivered, the MNPELP is set to ELP and ELP set
7
to NO_LP_EXPECTED. Upon a mnret, if the mnstatus.MNPP holds the
8
value y, then ELP is set to the value of MNPELP if yLPE is 1;
9
otherwise, it is set to NO_LP_EXPECTED.
10
11
Signed-off-by: Frank Chang <frank.chang@sifive.com>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
14
Message-ID: <20250106054336.1878291-7-frank.chang@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
target/riscv/cpu_bits.h | 1 +
18
target/riscv/cpu_helper.c | 11 ++++++++++-
19
target/riscv/op_helper.c | 9 +++++++++
20
3 files changed, 20 insertions(+), 1 deletion(-)
21
22
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
23
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/cpu_bits.h
25
+++ b/target/riscv/cpu_bits.h
26
@@ -XXX,XX +XXX,XX @@ typedef enum {
27
/* RNMI mnstatus CSR mask */
28
#define MNSTATUS_NMIE 0x00000008
29
#define MNSTATUS_MNPV 0x00000080
30
+#define MNSTATUS_MNPELP 0x00000200
31
#define MNSTATUS_MNPP 0x00001800
32
33
/* VM modes (satp.mode) privileged ISA 1.10 */
34
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
35
index XXXXXXX..XXXXXXX 100644
36
--- a/target/riscv/cpu_helper.c
37
+++ b/target/riscv/cpu_helper.c
38
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
39
env->mnepc = env->pc;
40
env->pc = env->rnmi_irqvec;
41
42
+ if (cpu_get_fcfien(env)) {
43
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
44
+ }
45
+
46
/* Trapping to M mode, virt is disabled */
47
riscv_cpu_set_mode(env, PRV_M, false);
48
49
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
50
/* handle the trap in M-mode */
51
/* save elp status */
52
if (cpu_get_fcfien(env)) {
53
- env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
54
+ if (nnmi_excep) {
55
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
56
+ env->elp);
57
+ } else {
58
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
59
+ }
60
}
61
62
if (riscv_has_ext(env, RVH)) {
63
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
64
index XXXXXXX..XXXXXXX 100644
65
--- a/target/riscv/op_helper.c
66
+++ b/target/riscv/op_helper.c
67
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mnret(CPURISCVState *env)
68
69
riscv_cpu_set_mode(env, prev_priv, prev_virt);
70
71
+ /*
72
+ * If forward cfi enabled for new priv, restore elp status
73
+ * and clear mnpelp in mnstatus
74
+ */
75
+ if (cpu_get_fcfien(env)) {
76
+ env->elp = get_field(env->mnstatus, MNSTATUS_MNPELP);
77
+ }
78
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, 0);
79
+
80
return retpc;
81
}
82
83
--
84
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
1
3
Keep kvm_riscv_get_timebase_frequency() prototype aligned with
4
the other ones declared in "kvm_riscv.h", have it take a RISCVCPU
5
cpu as argument. Include "target/riscv/cpu-qom.h" which declares
6
the RISCVCPU typedef.
7
8
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250112231344.34632-2-philmd@linaro.org>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/kvm/kvm_riscv.h | 4 +++-
14
hw/riscv/virt.c | 2 +-
15
target/riscv/kvm/kvm-cpu.c | 4 ++--
16
3 files changed, 6 insertions(+), 4 deletions(-)
17
18
diff --git a/target/riscv/kvm/kvm_riscv.h b/target/riscv/kvm/kvm_riscv.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/kvm/kvm_riscv.h
21
+++ b/target/riscv/kvm/kvm_riscv.h
22
@@ -XXX,XX +XXX,XX @@
23
#ifndef QEMU_KVM_RISCV_H
24
#define QEMU_KVM_RISCV_H
25
26
+#include "target/riscv/cpu-qom.h"
27
+
28
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
29
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
30
void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
31
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
32
void riscv_kvm_aplic_request(void *opaque, int irq, int level);
33
int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state);
34
void riscv_kvm_cpu_finalize_features(RISCVCPU *cpu, Error **errp);
35
-uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs);
36
+uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu);
37
38
#endif
39
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
40
index XXXXXXX..XXXXXXX 100644
41
--- a/hw/riscv/virt.c
42
+++ b/hw/riscv/virt.c
43
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
44
qemu_fdt_add_subnode(ms->fdt, "/cpus");
45
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "timebase-frequency",
46
kvm_enabled() ?
47
- kvm_riscv_get_timebase_frequency(first_cpu) :
48
+ kvm_riscv_get_timebase_frequency(RISCV_CPU(first_cpu)) :
49
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ);
50
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
51
qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 0x1);
52
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/riscv/kvm/kvm-cpu.c
55
+++ b/target/riscv/kvm/kvm-cpu.c
56
@@ -XXX,XX +XXX,XX @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
57
env->kvm_timer_dirty = false;
58
}
59
60
-uint64_t kvm_riscv_get_timebase_frequency(CPUState *cs)
61
+uint64_t kvm_riscv_get_timebase_frequency(RISCVCPU *cpu)
62
{
63
uint64_t reg;
64
65
- KVM_RISCV_GET_TIMER(cs, frequency, reg);
66
+ KVM_RISCV_GET_TIMER(CPU(cpu), frequency, reg);
67
68
return reg;
69
}
70
--
71
2.47.1
72
73
diff view generated by jsdifflib
Deleted patch
1
From: Kaiwen Xue <kaiwenx@rivosinc.com>
2
1
3
This adds the properties for sxcsrind. Definitions of new registers and
4
implementations will come with future patches.
5
6
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Atish Patra <atishp@rivosinc.com>
10
Message-ID: <20250110-counter_delegation-v5-1-e83d797ae294@rivosinc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/cpu_cfg.h | 2 ++
14
target/riscv/cpu.c | 2 ++
15
2 files changed, 4 insertions(+)
16
17
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/cpu_cfg.h
20
+++ b/target/riscv/cpu_cfg.h
21
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
22
bool ext_smstateen;
23
bool ext_sstc;
24
bool ext_smcntrpmf;
25
+ bool ext_smcsrind;
26
+ bool ext_sscsrind;
27
bool ext_svadu;
28
bool ext_svinval;
29
bool ext_svnapot;
30
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/riscv/cpu.c
33
+++ b/target/riscv/cpu.c
34
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
35
ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
36
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
37
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
38
+ ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind),
39
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
40
ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi),
41
ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
42
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
43
ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
44
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
45
ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
46
+ ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind),
47
ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
48
ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
49
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
50
--
51
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Kaiwen Xue <kaiwenx@rivosinc.com>
2
1
3
This adds the indirect access registers required by sscsrind/smcsrind
4
and the operations on them. Note that xiselect and xireg are used for
5
both AIA and sxcsrind, and the behavior of accessing them depends on
6
whether each extension is enabled and the value stored in xiselect.
7
8
Co-developed-by: Atish Patra <atishp@rivosinc.com>
9
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
10
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Atish Patra <atishp@rivosinc.com>
13
Message-ID: <20250110-counter_delegation-v5-4-e83d797ae294@rivosinc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
target/riscv/cpu_bits.h | 28 +++++++-
17
target/riscv/csr.c | 144 ++++++++++++++++++++++++++++++++++++++--
18
2 files changed, 166 insertions(+), 6 deletions(-)
19
20
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/cpu_bits.h
23
+++ b/target/riscv/cpu_bits.h
24
@@ -XXX,XX +XXX,XX @@
25
#define CSR_MISELECT 0x350
26
#define CSR_MIREG 0x351
27
28
+/* Machine Indirect Register Alias */
29
+#define CSR_MIREG2 0x352
30
+#define CSR_MIREG3 0x353
31
+#define CSR_MIREG4 0x355
32
+#define CSR_MIREG5 0x356
33
+#define CSR_MIREG6 0x357
34
+
35
/* Machine-Level Interrupts (AIA) */
36
#define CSR_MTOPEI 0x35c
37
#define CSR_MTOPI 0xfb0
38
@@ -XXX,XX +XXX,XX @@
39
#define CSR_SISELECT 0x150
40
#define CSR_SIREG 0x151
41
42
+/* Supervisor Indirect Register Alias */
43
+#define CSR_SIREG2 0x152
44
+#define CSR_SIREG3 0x153
45
+#define CSR_SIREG4 0x155
46
+#define CSR_SIREG5 0x156
47
+#define CSR_SIREG6 0x157
48
+
49
/* Supervisor-Level Interrupts (AIA) */
50
#define CSR_STOPEI 0x15c
51
#define CSR_STOPI 0xdb0
52
@@ -XXX,XX +XXX,XX @@
53
#define CSR_VSISELECT 0x250
54
#define CSR_VSIREG 0x251
55
56
+/* Virtual Supervisor Indirect Alias */
57
+#define CSR_VSIREG2 0x252
58
+#define CSR_VSIREG3 0x253
59
+#define CSR_VSIREG4 0x255
60
+#define CSR_VSIREG5 0x256
61
+#define CSR_VSIREG6 0x257
62
+
63
/* VS-Level Interrupts (H-extension with AIA) */
64
#define CSR_VSTOPEI 0x25c
65
#define CSR_VSTOPI 0xeb0
66
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
67
#define ISELECT_IMSIC_EIE63 0xff
68
#define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY
69
#define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63
70
-#define ISELECT_MASK 0x1ff
71
+#define ISELECT_MASK_AIA 0x1ff
72
+
73
+/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
74
+#define ISELECT_MASK_SXCSRIND 0xfff
75
76
/* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */
77
-#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1)
78
+#define ISELECT_IMSIC_TOPEI (ISELECT_MASK_AIA + 1)
79
80
/* IMSIC bits (AIA) */
81
#define IMSIC_TOPEI_IID_SHIFT 16
82
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
83
index XXXXXXX..XXXXXXX 100644
84
--- a/target/riscv/csr.c
85
+++ b/target/riscv/csr.c
86
@@ -XXX,XX +XXX,XX @@ static RISCVException aia_any32(CPURISCVState *env, int csrno)
87
return any32(env, csrno);
88
}
89
90
+static RISCVException csrind_any(CPURISCVState *env, int csrno)
91
+{
92
+ if (!riscv_cpu_cfg(env)->ext_smcsrind) {
93
+ return RISCV_EXCP_ILLEGAL_INST;
94
+ }
95
+
96
+ return RISCV_EXCP_NONE;
97
+}
98
+
99
static RISCVException csrind_or_aia_any(CPURISCVState *env, int csrno)
100
{
101
if (!riscv_cpu_cfg(env)->ext_smaia && !riscv_cpu_cfg(env)->ext_smcsrind) {
102
@@ -XXX,XX +XXX,XX @@ static bool csrind_or_aia_extensions_present(CPURISCVState *env)
103
return csrind_extensions_present(env) || aia_extensions_present(env);
104
}
105
106
+static RISCVException csrind_smode(CPURISCVState *env, int csrno)
107
+{
108
+ if (!csrind_extensions_present(env)) {
109
+ return RISCV_EXCP_ILLEGAL_INST;
110
+ }
111
+
112
+ return smode(env, csrno);
113
+}
114
+
115
static RISCVException csrind_or_aia_smode(CPURISCVState *env, int csrno)
116
{
117
if (!csrind_or_aia_extensions_present(env)) {
118
@@ -XXX,XX +XXX,XX @@ static RISCVException hmode32(CPURISCVState *env, int csrno)
119
120
}
121
122
+static RISCVException csrind_hmode(CPURISCVState *env, int csrno)
123
+{
124
+ if (!csrind_extensions_present(env)) {
125
+ return RISCV_EXCP_ILLEGAL_INST;
126
+ }
127
+
128
+ return hmode(env, csrno);
129
+}
130
+
131
static RISCVException csrind_or_aia_hmode(CPURISCVState *env, int csrno)
132
{
133
if (!csrind_or_aia_extensions_present(env)) {
134
@@ -XXX,XX +XXX,XX @@ static int csrind_xlate_vs_csrno(CPURISCVState *env, int csrno)
135
case CSR_SISELECT:
136
return CSR_VSISELECT;
137
case CSR_SIREG:
138
- return CSR_VSIREG;
139
+ case CSR_SIREG2:
140
+ case CSR_SIREG3:
141
+ case CSR_SIREG4:
142
+ case CSR_SIREG5:
143
+ case CSR_SIREG6:
144
+ return CSR_VSIREG + (csrno - CSR_SIREG);
145
default:
146
return csrno;
147
};
148
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xiselect(CPURISCVState *env, int csrno,
149
*val = *iselect;
150
}
151
152
- wr_mask &= ISELECT_MASK;
153
+ if (riscv_cpu_cfg(env)->ext_smcsrind || riscv_cpu_cfg(env)->ext_sscsrind) {
154
+ wr_mask &= ISELECT_MASK_SXCSRIND;
155
+ } else {
156
+ wr_mask &= ISELECT_MASK_AIA;
157
+ }
158
+
159
if (wr_mask) {
160
*iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
161
}
162
@@ -XXX,XX +XXX,XX @@ done:
163
return RISCV_EXCP_NONE;
164
}
165
166
+/*
167
+ * rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
168
+ *
169
+ * Perform indirect access to xireg and xireg2-xireg6.
170
+ * This is a generic interface for all xireg CSRs. Apart from AIA, all other
171
+ * extension using csrind should be implemented here.
172
+ */
173
+static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
174
+ target_ulong isel, target_ulong *val,
175
+ target_ulong new_val, target_ulong wr_mask)
176
+{
177
+ return -EINVAL;
178
+}
179
+
180
+static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
181
+ target_ulong new_val, target_ulong wr_mask)
182
+{
183
+ bool virt = false;
184
+ int ret = -EINVAL;
185
+ target_ulong isel;
186
+
187
+ ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
188
+ if (ret != RISCV_EXCP_NONE) {
189
+ return ret;
190
+ }
191
+
192
+ /* Translate CSR number for VS-mode */
193
+ csrno = csrind_xlate_vs_csrno(env, csrno);
194
+
195
+ if (CSR_MIREG <= csrno && csrno <= CSR_MIREG6 &&
196
+ csrno != CSR_MIREG4 - 1) {
197
+ isel = env->miselect;
198
+ } else if (CSR_SIREG <= csrno && csrno <= CSR_SIREG6 &&
199
+ csrno != CSR_SIREG4 - 1) {
200
+ isel = env->siselect;
201
+ } else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
202
+ csrno != CSR_VSIREG4 - 1) {
203
+ isel = env->vsiselect;
204
+ virt = true;
205
+ } else {
206
+ goto done;
207
+ }
208
+
209
+ return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
210
+
211
+done:
212
+ return (env->virt_enabled && virt) ?
213
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
214
+}
215
+
216
static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
217
target_ulong *val, target_ulong new_val,
218
target_ulong wr_mask)
219
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
220
goto done;
221
};
222
223
+ /*
224
+ * Use the xiselect range to determine actual op on xireg.
225
+ *
226
+ * Since we only checked the existence of AIA or Indirect Access in the
227
+ * predicate, we should check the existence of the exact extension when
228
+ * we get to a specific range and return illegal instruction exception even
229
+ * in VS-mode.
230
+ */
231
if (xiselect_aia_range(isel)) {
232
return rmw_xireg_aia(env, csrno, isel, val, new_val, wr_mask);
233
+ } else if (riscv_cpu_cfg(env)->ext_smcsrind ||
234
+ riscv_cpu_cfg(env)->ext_sscsrind) {
235
+ return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
236
+ } else {
237
+ return RISCV_EXCP_ILLEGAL_INST;
238
}
239
240
done:
241
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
242
wr_mask |= SMSTATEEN0_P1P13;
243
}
244
245
- if (riscv_cpu_cfg(env)->ext_smaia) {
246
+ if (riscv_cpu_cfg(env)->ext_smaia || riscv_cpu_cfg(env)->ext_smcsrind) {
247
wr_mask |= SMSTATEEN0_SVSLCT;
248
}
249
250
@@ -XXX,XX +XXX,XX @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
251
wr_mask |= SMSTATEEN0_FCSR;
252
}
253
254
- if (riscv_cpu_cfg(env)->ext_ssaia) {
255
+ if (riscv_cpu_cfg(env)->ext_ssaia || riscv_cpu_cfg(env)->ext_sscsrind) {
256
wr_mask |= SMSTATEEN0_SVSLCT;
257
}
258
259
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
260
[CSR_MIREG] = { "mireg", csrind_or_aia_any, NULL, NULL,
261
rmw_xireg },
262
263
+ /* Machine Indirect Register Alias */
264
+ [CSR_MIREG2] = { "mireg2", csrind_any, NULL, NULL, rmw_xiregi,
265
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
266
+ [CSR_MIREG3] = { "mireg3", csrind_any, NULL, NULL, rmw_xiregi,
267
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
268
+ [CSR_MIREG4] = { "mireg4", csrind_any, NULL, NULL, rmw_xiregi,
269
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
270
+ [CSR_MIREG5] = { "mireg5", csrind_any, NULL, NULL, rmw_xiregi,
271
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
272
+ [CSR_MIREG6] = { "mireg6", csrind_any, NULL, NULL, rmw_xiregi,
273
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
274
+
275
/* Machine-Level Interrupts (AIA) */
276
[CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
277
[CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
278
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
279
[CSR_SIREG] = { "sireg", csrind_or_aia_smode, NULL, NULL,
280
rmw_xireg },
281
282
+ /* Supervisor Indirect Register Alias */
283
+ [CSR_SIREG2] = { "sireg2", csrind_smode, NULL, NULL, rmw_xiregi,
284
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
285
+ [CSR_SIREG3] = { "sireg3", csrind_smode, NULL, NULL, rmw_xiregi,
286
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
287
+ [CSR_SIREG4] = { "sireg4", csrind_smode, NULL, NULL, rmw_xiregi,
288
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
289
+ [CSR_SIREG5] = { "sireg5", csrind_smode, NULL, NULL, rmw_xiregi,
290
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
291
+ [CSR_SIREG6] = { "sireg6", csrind_smode, NULL, NULL, rmw_xiregi,
292
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
293
+
294
/* Supervisor-Level Interrupts (AIA) */
295
[CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
296
[CSR_STOPI] = { "stopi", aia_smode, read_stopi },
297
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
298
[CSR_VSIREG] = { "vsireg", csrind_or_aia_hmode, NULL, NULL,
299
rmw_xireg },
300
301
+ /* Virtual Supervisor Indirect Alias */
302
+ [CSR_VSIREG2] = { "vsireg2", csrind_hmode, NULL, NULL, rmw_xiregi,
303
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
304
+ [CSR_VSIREG3] = { "vsireg3", csrind_hmode, NULL, NULL, rmw_xiregi,
305
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
306
+ [CSR_VSIREG4] = { "vsireg4", csrind_hmode, NULL, NULL, rmw_xiregi,
307
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
308
+ [CSR_VSIREG5] = { "vsireg5", csrind_hmode, NULL, NULL, rmw_xiregi,
309
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
310
+ [CSR_VSIREG6] = { "vsireg6", csrind_hmode, NULL, NULL, rmw_xiregi,
311
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
312
+
313
/* VS-Level Interrupts (H-extension with AIA) */
314
[CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
315
[CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
316
--
317
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Atish Patra <atishp@rivosinc.com>
2
1
3
This adds the properties for counter delegation ISA extensions
4
(Smcdeleg/Ssccfg). Definitions of new registers and and implementation
5
will come in the next set of patches.
6
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Atish Patra <atishp@rivosinc.com>
10
Message-ID: <20250110-counter_delegation-v5-5-e83d797ae294@rivosinc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/cpu_cfg.h | 2 ++
14
target/riscv/cpu.c | 2 ++
15
2 files changed, 4 insertions(+)
16
17
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/cpu_cfg.h
20
+++ b/target/riscv/cpu_cfg.h
21
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
22
bool ext_ztso;
23
bool ext_smstateen;
24
bool ext_sstc;
25
+ bool ext_smcdeleg;
26
+ bool ext_ssccfg;
27
bool ext_smcntrpmf;
28
bool ext_smcsrind;
29
bool ext_sscsrind;
30
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
31
index XXXXXXX..XXXXXXX 100644
32
--- a/target/riscv/cpu.c
33
+++ b/target/riscv/cpu.c
34
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
35
ISA_EXT_DATA_ENTRY(shvstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
36
ISA_EXT_DATA_ENTRY(shvstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
37
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
38
+ ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg),
39
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
40
ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind),
41
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
42
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
43
ISA_EXT_DATA_ENTRY(smnpm, PRIV_VERSION_1_13_0, ext_smnpm),
44
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
45
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
46
+ ISA_EXT_DATA_ENTRY(ssccfg, PRIV_VERSION_1_13_0, ext_ssccfg),
47
ISA_EXT_DATA_ENTRY(ssccptr, PRIV_VERSION_1_11_0, has_priv_1_11),
48
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
49
ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
50
--
51
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Kaiwen Xue <kaiwenx@rivosinc.com>
2
1
3
This adds definitions for counter delegation, including the new
4
scountinhibit register and the mstateen.CD bit.
5
6
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Atish Patra <atishp@rivosinc.com>
9
Message-ID: <20250110-counter_delegation-v5-6-e83d797ae294@rivosinc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/cpu.h | 1 +
13
target/riscv/cpu_bits.h | 8 +++++++-
14
target/riscv/machine.c | 1 +
15
3 files changed, 9 insertions(+), 1 deletion(-)
16
17
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/cpu.h
20
+++ b/target/riscv/cpu.h
21
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
22
uint32_t scounteren;
23
uint32_t mcounteren;
24
25
+ uint32_t scountinhibit;
26
uint32_t mcountinhibit;
27
28
/* PMU cycle & instret privilege mode filtering */
29
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu_bits.h
32
+++ b/target/riscv/cpu_bits.h
33
@@ -XXX,XX +XXX,XX @@
34
#define CSR_SSTATEEN2 0x10E
35
#define CSR_SSTATEEN3 0x10F
36
37
+/* Supervisor Counter Delegation */
38
+#define CSR_SCOUNTINHIBIT 0x120
39
+
40
/* Supervisor Trap Handling */
41
#define CSR_SSCRATCH 0x140
42
#define CSR_SEPC 0x141
43
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
44
#define MENVCFG_CBCFE BIT(6)
45
#define MENVCFG_CBZE BIT(7)
46
#define MENVCFG_PMM (3ULL << 32)
47
+#define MENVCFG_CDE (1ULL << 60)
48
#define MENVCFG_ADUE (1ULL << 61)
49
#define MENVCFG_PBMTE (1ULL << 62)
50
#define MENVCFG_STCE (1ULL << 63)
51
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
52
#define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63
53
#define ISELECT_MASK_AIA 0x1ff
54
55
-/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
56
+/* [M|S|VS]SELCT value for Indirect CSR Access Extension */
57
+#define ISELECT_CD_FIRST 0x40
58
+#define ISELECT_CD_LAST 0x5f
59
#define ISELECT_MASK_SXCSRIND 0xfff
60
61
/* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */
62
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
63
index XXXXXXX..XXXXXXX 100644
64
--- a/target/riscv/machine.c
65
+++ b/target/riscv/machine.c
66
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_riscv_cpu = {
67
VMSTATE_UINTTL(env.siselect, RISCVCPU),
68
VMSTATE_UINT32(env.scounteren, RISCVCPU),
69
VMSTATE_UINT32(env.mcounteren, RISCVCPU),
70
+ VMSTATE_UINT32(env.scountinhibit, RISCVCPU),
71
VMSTATE_UINT32(env.mcountinhibit, RISCVCPU),
72
VMSTATE_STRUCT_ARRAY(env.pmu_ctrs, RISCVCPU, RV_MAX_MHPMCOUNTERS, 0,
73
vmstate_pmu_ctr_state, PMUCTRState),
74
--
75
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Kaiwen Xue <kaiwenx@rivosinc.com>
2
1
3
This adds checks in ops performed on xireg and xireg2-xireg6 so that the
4
counter delegation function will receive a valid xiselect value with the
5
proper extensions enabled.
6
7
Co-developed-by: Atish Patra <atishp@rivosinc.com>
8
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Atish Patra <atishp@rivosinc.com>
11
Message-ID: <20250110-counter_delegation-v5-7-e83d797ae294@rivosinc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
---
14
target/riscv/csr.c | 36 +++++++++++++++++++++++++++++++++++-
15
1 file changed, 35 insertions(+), 1 deletion(-)
16
17
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/csr.c
20
+++ b/target/riscv/csr.c
21
@@ -XXX,XX +XXX,XX @@ static bool xiselect_aia_range(target_ulong isel)
22
(ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST);
23
}
24
25
+static bool xiselect_cd_range(target_ulong isel)
26
+{
27
+ return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
28
+}
29
+
30
static int rmw_iprio(target_ulong xlen,
31
target_ulong iselect, uint8_t *iprio,
32
target_ulong *val, target_ulong new_val,
33
@@ -XXX,XX +XXX,XX @@ done:
34
return RISCV_EXCP_NONE;
35
}
36
37
+static int rmw_xireg_cd(CPURISCVState *env, int csrno,
38
+ target_ulong isel, target_ulong *val,
39
+ target_ulong new_val, target_ulong wr_mask)
40
+{
41
+ if (!riscv_cpu_cfg(env)->ext_smcdeleg) {
42
+ return RISCV_EXCP_ILLEGAL_INST;
43
+ }
44
+ /* TODO: Implement the functionality later */
45
+ return RISCV_EXCP_NONE;
46
+}
47
+
48
/*
49
* rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
50
*
51
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
52
target_ulong isel, target_ulong *val,
53
target_ulong new_val, target_ulong wr_mask)
54
{
55
- return -EINVAL;
56
+ int ret = -EINVAL;
57
+ bool virt = csrno == CSR_VSIREG ? true : false;
58
+
59
+ if (xiselect_cd_range(isel)) {
60
+ ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
61
+ } else {
62
+ /*
63
+ * As per the specification, access to unimplented region is undefined
64
+ * but recommendation is to raise illegal instruction exception.
65
+ */
66
+ return RISCV_EXCP_ILLEGAL_INST;
67
+ }
68
+
69
+ if (ret) {
70
+ return (env->virt_enabled && virt) ?
71
+ RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
72
+ }
73
+
74
+ return RISCV_EXCP_NONE;
75
}
76
77
static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
78
--
79
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Atish Patra <atishp@rivosinc.com>
2
1
3
The dependant ISA features are enabled at the end of cpu_realize
4
in finalize_features. Thus, PMU init should be invoked after that
5
only. Move the init invocation to riscv_tcg_cpu_finalize_features.
6
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Signed-off-by: Atish Patra <atishp@rivosinc.com>
9
Message-ID: <20250110-counter_delegation-v5-9-e83d797ae294@rivosinc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/tcg/tcg-cpu.c | 28 ++++++++++++++--------------
13
1 file changed, 14 insertions(+), 14 deletions(-)
14
15
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/tcg/tcg-cpu.c
18
+++ b/target/riscv/tcg/tcg-cpu.c
19
@@ -XXX,XX +XXX,XX @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
20
error_propagate(errp, local_err);
21
return;
22
}
23
+#ifndef CONFIG_USER_ONLY
24
+ if (cpu->cfg.pmu_mask) {
25
+ riscv_pmu_init(cpu, &local_err);
26
+ if (local_err != NULL) {
27
+ error_propagate(errp, local_err);
28
+ return;
29
+ }
30
+
31
+ if (cpu->cfg.ext_sscofpmf) {
32
+ cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
33
+ riscv_pmu_timer_cb, cpu);
34
+ }
35
+ }
36
+#endif
37
}
38
39
void riscv_tcg_cpu_finalize_dynamic_decoder(RISCVCPU *cpu)
40
@@ -XXX,XX +XXX,XX @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
41
42
#ifndef CONFIG_USER_ONLY
43
CPURISCVState *env = &cpu->env;
44
- Error *local_err = NULL;
45
46
tcg_cflags_set(CPU(cs), CF_PCREL);
47
48
@@ -XXX,XX +XXX,XX @@ static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
49
riscv_timer_init(cpu);
50
}
51
52
- if (cpu->cfg.pmu_mask) {
53
- riscv_pmu_init(cpu, &local_err);
54
- if (local_err != NULL) {
55
- error_propagate(errp, local_err);
56
- return false;
57
- }
58
-
59
- if (cpu->cfg.ext_sscofpmf) {
60
- cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
61
- riscv_pmu_timer_cb, cpu);
62
- }
63
- }
64
-
65
/* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
66
if (riscv_has_ext(env, RVH)) {
67
env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
68
--
69
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Atish Patra <atishp@rivosinc.com>
2
1
3
The counter delegation/configuration extensions depend on the following
4
extensions.
5
6
1. Smcdeleg - To enable counter delegation from M to S
7
2. S[m|s]csrind - To enable indirect access CSRs
8
9
Add an implied rule so that these extensions are enabled by default
10
if the sscfg extension is enabled.
11
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Atish Patra <atishp@rivosinc.com>
15
Message-ID: <20250110-counter_delegation-v5-10-e83d797ae294@rivosinc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
18
target/riscv/cpu.c | 12 +++++++++++-
19
1 file changed, 11 insertions(+), 1 deletion(-)
20
21
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/cpu.c
24
+++ b/target/riscv/cpu.c
25
@@ -XXX,XX +XXX,XX @@ static RISCVCPUImpliedExtsRule ZVKSG_IMPLIED = {
26
},
27
};
28
29
+static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = {
30
+ .ext = CPU_CFG_OFFSET(ext_ssccfg),
31
+ .implied_multi_exts = {
32
+ CPU_CFG_OFFSET(ext_smcsrind), CPU_CFG_OFFSET(ext_sscsrind),
33
+ CPU_CFG_OFFSET(ext_smcdeleg),
34
+
35
+ RISCV_IMPLIED_EXTS_RULE_END
36
+ },
37
+};
38
+
39
RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
40
&RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
41
&RVM_IMPLIED, &RVV_IMPLIED, NULL
42
@@ -XXX,XX +XXX,XX @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
43
&ZVE64X_IMPLIED, &ZVFBFMIN_IMPLIED, &ZVFBFWMA_IMPLIED,
44
&ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
45
&ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
46
- &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED,
47
+ &ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
48
NULL
49
};
50
51
--
52
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Atish Patra <atishp@rivosinc.com>
2
1
3
Add configuration options so that they can be enabled/disabld from
4
qemu commandline.
5
6
Acked-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Signed-off-by: Atish Patra <atishp@rivosinc.com>
9
Message-ID: <20250110-counter_delegation-v5-11-e83d797ae294@rivosinc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/cpu.c | 4 ++++
13
1 file changed, 4 insertions(+)
14
15
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu.c
18
+++ b/target/riscv/cpu.c
19
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
20
/* Defaults for standard extensions */
21
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
22
MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
23
+ MULTI_EXT_CFG_BOOL("smcsrind", ext_smcsrind, false),
24
+ MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
25
+ MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
26
+ MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
27
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
28
MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
29
MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
30
--
31
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Clément Léger <cleger@rivosinc.com>
2
1
3
With the current implementation, if we had the following scenario:
4
- Set bit x in menvcfg
5
- Set bit x in henvcfg
6
- Clear bit x in menvcfg
7
then, the internal variable env->henvcfg would still contain bit x due
8
to both a wrong menvcfg mask used in write_henvcfg() as well as a
9
missing update of henvcfg upon menvcfg update.
10
This can lead to some wrong interpretation of the context. In order to
11
update henvcfg upon menvcfg writing, call write_henvcfg() after writing
12
menvcfg. Clearing henvcfg upon writing the new value is also needed in
13
write_henvcfg() as well as clearing henvcfg upper part when writing it
14
with write_henvcfgh().
15
16
Signed-off-by: Clément Léger <cleger@rivosinc.com>
17
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
18
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
19
Message-ID: <20250110125441.3208676-2-cleger@rivosinc.com>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
---
22
target/riscv/csr.c | 10 ++++++++--
23
1 file changed, 8 insertions(+), 2 deletions(-)
24
25
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
26
index XXXXXXX..XXXXXXX 100644
27
--- a/target/riscv/csr.c
28
+++ b/target/riscv/csr.c
29
@@ -XXX,XX +XXX,XX @@ static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
30
return RISCV_EXCP_NONE;
31
}
32
33
+static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
34
+ target_ulong val);
35
static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
36
target_ulong val)
37
{
38
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
39
}
40
}
41
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
42
+ write_henvcfg(env, CSR_HENVCFG, env->henvcfg);
43
44
return RISCV_EXCP_NONE;
45
}
46
@@ -XXX,XX +XXX,XX @@ static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
47
return RISCV_EXCP_NONE;
48
}
49
50
+static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
51
+ target_ulong val);
52
static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
53
target_ulong val)
54
{
55
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
56
uint64_t valh = (uint64_t)val << 32;
57
58
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
59
+ write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32);
60
61
return RISCV_EXCP_NONE;
62
}
63
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
64
}
65
}
66
67
- env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
68
+ env->henvcfg = val & mask;
69
70
return RISCV_EXCP_NONE;
71
}
72
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
73
return ret;
74
}
75
76
- env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
77
+ env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask);
78
return RISCV_EXCP_NONE;
79
}
80
81
--
82
2.47.1
83
84
diff view generated by jsdifflib
Deleted patch
1
From: Clément Léger <cleger@rivosinc.com>
2
1
3
Add ext_ssdbltrp in RISCVCPUConfig and implement MSTATUS.SDT,
4
{H|M}ENVCFG.DTE and modify the availability of MTVAL2 based on the
5
presence of the Ssdbltrp ISA extension.
6
7
Signed-off-by: Clément Léger <cleger@rivosinc.com>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250110125441.3208676-3-cleger@rivosinc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/cpu.h | 1 +
14
target/riscv/cpu_bits.h | 6 ++++
15
target/riscv/cpu_cfg.h | 1 +
16
target/riscv/cpu_helper.c | 17 ++++++++++
17
target/riscv/csr.c | 71 ++++++++++++++++++++++++++++++++-------
18
5 files changed, 84 insertions(+), 12 deletions(-)
19
20
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
21
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/cpu.h
23
+++ b/target/riscv/cpu.h
24
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
25
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch);
26
bool cpu_get_fcfien(CPURISCVState *env);
27
bool cpu_get_bcfien(CPURISCVState *env);
28
+bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt);
29
G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
30
MMUAccessType access_type,
31
int mmu_idx, uintptr_t retaddr);
32
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu_bits.h
35
+++ b/target/riscv/cpu_bits.h
36
@@ -XXX,XX +XXX,XX @@
37
#define MSTATUS_TW 0x00200000 /* since: priv-1.10 */
38
#define MSTATUS_TSR 0x00400000 /* since: priv-1.10 */
39
#define MSTATUS_SPELP 0x00800000 /* zicfilp */
40
+#define MSTATUS_SDT 0x01000000
41
#define MSTATUS_MPELP 0x020000000000 /* zicfilp */
42
#define MSTATUS_GVA 0x4000000000ULL
43
#define MSTATUS_MPV 0x8000000000ULL
44
@@ -XXX,XX +XXX,XX @@ typedef enum {
45
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
46
#define SSTATUS_MXR 0x00080000
47
#define SSTATUS_SPELP MSTATUS_SPELP /* zicfilp */
48
+#define SSTATUS_SDT MSTATUS_SDT
49
50
#define SSTATUS64_UXL 0x0000000300000000ULL
51
52
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
53
#define MENVCFG_CBCFE BIT(6)
54
#define MENVCFG_CBZE BIT(7)
55
#define MENVCFG_PMM (3ULL << 32)
56
+#define MENVCFG_DTE (1ULL << 59)
57
#define MENVCFG_CDE (1ULL << 60)
58
#define MENVCFG_ADUE (1ULL << 61)
59
#define MENVCFG_PBMTE (1ULL << 62)
60
#define MENVCFG_STCE (1ULL << 63)
61
62
/* For RV32 */
63
+#define MENVCFGH_DTE BIT(27)
64
#define MENVCFGH_ADUE BIT(29)
65
#define MENVCFGH_PBMTE BIT(30)
66
#define MENVCFGH_STCE BIT(31)
67
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
68
#define HENVCFG_CBCFE MENVCFG_CBCFE
69
#define HENVCFG_CBZE MENVCFG_CBZE
70
#define HENVCFG_PMM MENVCFG_PMM
71
+#define HENVCFG_DTE MENVCFG_DTE
72
#define HENVCFG_ADUE MENVCFG_ADUE
73
#define HENVCFG_PBMTE MENVCFG_PBMTE
74
#define HENVCFG_STCE MENVCFG_STCE
75
76
/* For RV32 */
77
+#define HENVCFGH_DTE MENVCFGH_DTE
78
#define HENVCFGH_ADUE MENVCFGH_ADUE
79
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
80
#define HENVCFGH_STCE MENVCFGH_STCE
81
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
82
index XXXXXXX..XXXXXXX 100644
83
--- a/target/riscv/cpu_cfg.h
84
+++ b/target/riscv/cpu_cfg.h
85
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
86
bool ext_smcntrpmf;
87
bool ext_smcsrind;
88
bool ext_sscsrind;
89
+ bool ext_ssdbltrp;
90
bool ext_svadu;
91
bool ext_svinval;
92
bool ext_svnapot;
93
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
94
index XXXXXXX..XXXXXXX 100644
95
--- a/target/riscv/cpu_helper.c
96
+++ b/target/riscv/cpu_helper.c
97
@@ -XXX,XX +XXX,XX @@ bool cpu_get_bcfien(CPURISCVState *env)
98
}
99
}
100
101
+bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
102
+{
103
+#ifdef CONFIG_USER_ONLY
104
+ return false;
105
+#else
106
+ if (virt) {
107
+ return (env->henvcfg & HENVCFG_DTE) != 0;
108
+ } else {
109
+ return (env->menvcfg & MENVCFG_DTE) != 0;
110
+ }
111
+#endif
112
+}
113
+
114
void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
115
uint64_t *cs_base, uint32_t *pflags)
116
{
117
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
118
119
g_assert(riscv_has_ext(env, RVH));
120
121
+ if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
122
+ mstatus_mask |= MSTATUS_SDT;
123
+ }
124
+
125
if (current_virt) {
126
/* Current V=1 and we are about to change to V=0 */
127
env->vsstatus = env->mstatus & mstatus_mask;
128
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
129
index XXXXXXX..XXXXXXX 100644
130
--- a/target/riscv/csr.c
131
+++ b/target/riscv/csr.c
132
@@ -XXX,XX +XXX,XX @@ static RISCVException aia_hmode32(CPURISCVState *env, int csrno)
133
return hmode32(env, csrno);
134
}
135
136
+static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
137
+{
138
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
139
+ return RISCV_EXCP_NONE;
140
+ }
141
+
142
+ return hmode(env, csrno);
143
+}
144
+
145
static RISCVException pmp(CPURISCVState *env, int csrno)
146
{
147
if (riscv_cpu_cfg(env)->pmp) {
148
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
149
mask |= MSTATUS_VS;
150
}
151
152
+ if (riscv_env_smode_dbltrp_enabled(env, env->virt_enabled)) {
153
+ mask |= MSTATUS_SDT;
154
+ if ((val & MSTATUS_SDT) != 0) {
155
+ val &= ~MSTATUS_SIE;
156
+ }
157
+ }
158
+
159
if (xl != MXL_RV32 || env->debugger) {
160
if (riscv_has_ext(env, RVH)) {
161
mask |= MSTATUS_MPV | MSTATUS_GVA;
162
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
163
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
164
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
165
(cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
166
- (cfg->ext_svadu ? MENVCFG_ADUE : 0);
167
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0) |
168
+ (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
169
170
if (env_archcpu(env)->cfg.ext_zicfilp) {
171
mask |= MENVCFG_LPE;
172
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
173
if (env_archcpu(env)->cfg.ext_smnpm &&
174
get_field(val, MENVCFG_PMM) != PMM_FIELD_RESERVED) {
175
mask |= MENVCFG_PMM;
176
+    }
177
+
178
+ if ((val & MENVCFG_DTE) == 0) {
179
+ env->mstatus &= ~MSTATUS_SDT;
180
}
181
}
182
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
183
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
184
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
185
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
186
(cfg->ext_svadu ? MENVCFG_ADUE : 0) |
187
- (cfg->ext_smcdeleg ? MENVCFG_CDE : 0);
188
+ (cfg->ext_smcdeleg ? MENVCFG_CDE : 0) |
189
+ (cfg->ext_ssdbltrp ? MENVCFG_DTE : 0);
190
uint64_t valh = (uint64_t)val << 32;
191
192
+ if ((valh & MENVCFG_DTE) == 0) {
193
+ env->mstatus &= ~MSTATUS_SDT;
194
+ }
195
+
196
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
197
write_henvcfgh(env, CSR_HENVCFGH, env->henvcfg >> 32);
198
199
@@ -XXX,XX +XXX,XX @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
200
* henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
201
* henvcfg.stce is read_only 0 when menvcfg.stce = 0
202
* henvcfg.adue is read_only 0 when menvcfg.adue = 0
203
+ * henvcfg.dte is read_only 0 when menvcfg.dte = 0
204
*/
205
- *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
206
- env->menvcfg);
207
+ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
208
+ HENVCFG_DTE) | env->menvcfg);
209
return RISCV_EXCP_NONE;
210
}
211
212
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
213
}
214
215
if (riscv_cpu_mxl(env) == MXL_RV64) {
216
- mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
217
+ mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
218
+ HENVCFG_DTE);
219
220
if (env_archcpu(env)->cfg.ext_zicfilp) {
221
mask |= HENVCFG_LPE;
222
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
223
}
224
225
env->henvcfg = val & mask;
226
+ if ((env->henvcfg & HENVCFG_DTE) == 0) {
227
+ env->vsstatus &= ~MSTATUS_SDT;
228
+ }
229
230
return RISCV_EXCP_NONE;
231
}
232
@@ -XXX,XX +XXX,XX @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
233
return ret;
234
}
235
236
- *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
237
- env->menvcfg)) >> 32;
238
+ *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE |
239
+ HENVCFG_DTE) | env->menvcfg)) >> 32;
240
return RISCV_EXCP_NONE;
241
}
242
243
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
244
target_ulong val)
245
{
246
uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
247
- HENVCFG_ADUE);
248
+ HENVCFG_ADUE | HENVCFG_DTE);
249
uint64_t valh = (uint64_t)val << 32;
250
RISCVException ret;
251
252
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
253
if (ret != RISCV_EXCP_NONE) {
254
return ret;
255
}
256
-
257
env->henvcfg = (env->henvcfg & 0xFFFFFFFF) | (valh & mask);
258
+ if ((env->henvcfg & HENVCFG_DTE) == 0) {
259
+ env->vsstatus &= ~MSTATUS_SDT;
260
+ }
261
return RISCV_EXCP_NONE;
262
}
263
264
@@ -XXX,XX +XXX,XX @@ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
265
if (env->xl != MXL_RV32 || env->debugger) {
266
mask |= SSTATUS64_UXL;
267
}
268
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
269
+ mask |= SSTATUS_SDT;
270
+ }
271
272
if (env_archcpu(env)->cfg.ext_zicfilp) {
273
mask |= SSTATUS_SPELP;
274
@@ -XXX,XX +XXX,XX @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno,
275
if (env_archcpu(env)->cfg.ext_zicfilp) {
276
mask |= SSTATUS_SPELP;
277
}
278
-
279
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
280
+ mask |= SSTATUS_SDT;
281
+ }
282
/* TODO: Use SXL not MXL. */
283
*val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
284
return RISCV_EXCP_NONE;
285
@@ -XXX,XX +XXX,XX @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno,
286
if (env_archcpu(env)->cfg.ext_zicfilp) {
287
mask |= SSTATUS_SPELP;
288
}
289
-
290
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
291
+ mask |= SSTATUS_SDT;
292
+ }
293
target_ulong newval = (env->mstatus & ~mask) | (val & mask);
294
return write_mstatus(env, CSR_MSTATUS, newval);
295
}
296
@@ -XXX,XX +XXX,XX @@ static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
297
if ((val & VSSTATUS64_UXL) == 0) {
298
mask &= ~VSSTATUS64_UXL;
299
}
300
+ if ((env->henvcfg & HENVCFG_DTE)) {
301
+ if ((val & SSTATUS_SDT) != 0) {
302
+ val &= ~SSTATUS_SIE;
303
+ }
304
+ } else {
305
+ val &= ~SSTATUS_SDT;
306
+ }
307
env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
308
return RISCV_EXCP_NONE;
309
}
310
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
311
[CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
312
.min_priv_ver = PRIV_VERSION_1_12_0 },
313
314
- [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
315
+ [CSR_MTVAL2] = { "mtval2", dbltrp_hmode, read_mtval2, write_mtval2,
316
.min_priv_ver = PRIV_VERSION_1_12_0 },
317
[CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
318
.min_priv_ver = PRIV_VERSION_1_12_0 },
319
--
320
2.47.1
321
322
diff view generated by jsdifflib
Deleted patch
1
From: Clément Léger <cleger@rivosinc.com>
2
1
3
When the Ssdbltrp extension is enabled, SSTATUS.SDT field is cleared
4
when executing sret. When executing mret/mnret, SSTATUS.SDT is cleared
5
when returning to U, VS or VU and VSSTATUS.SDT is cleared when returning
6
to VU from HS.
7
8
Signed-off-by: Clément Léger <cleger@rivosinc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250110125441.3208676-4-cleger@rivosinc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
target/riscv/op_helper.c | 35 ++++++++++++++++++++++++++++++++++-
14
1 file changed, 34 insertions(+), 1 deletion(-)
15
16
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/op_helper.c
19
+++ b/target/riscv/op_helper.c
20
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env)
21
get_field(mstatus, MSTATUS_SPIE));
22
mstatus = set_field(mstatus, MSTATUS_SPIE, 1);
23
mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
24
+
25
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
26
+ if (riscv_has_ext(env, RVH)) {
27
+ target_ulong prev_vu = get_field(env->hstatus, HSTATUS_SPV) &&
28
+ prev_priv == PRV_U;
29
+ /* Returning to VU from HS, vsstatus.sdt = 0 */
30
+ if (!env->virt_enabled && prev_vu) {
31
+ env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
32
+ }
33
+ }
34
+ mstatus = set_field(mstatus, MSTATUS_SDT, 0);
35
+ }
36
if (env->priv_ver >= PRIV_VERSION_1_12_0) {
37
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
38
}
39
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env)
40
target_ulong hstatus = env->hstatus;
41
42
prev_virt = get_field(hstatus, HSTATUS_SPV);
43
-
44
hstatus = set_field(hstatus, HSTATUS_SPV, 0);
45
46
env->hstatus = hstatus;
47
@@ -XXX,XX +XXX,XX @@ static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc,
48
riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
49
}
50
}
51
+static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus,
52
+ target_ulong prev_priv,
53
+ target_ulong prev_virt)
54
+{
55
+ /* If returning to U, VS or VU, sstatus.sdt = 0 */
56
+ if (prev_priv == PRV_U || (prev_virt &&
57
+ (prev_priv == PRV_S || prev_priv == PRV_U))) {
58
+ mstatus = set_field(mstatus, MSTATUS_SDT, 0);
59
+ /* If returning to VU, vsstatus.sdt = 0 */
60
+ if (prev_virt && prev_priv == PRV_U) {
61
+ env->vsstatus = set_field(env->vsstatus, MSTATUS_SDT, 0);
62
+ }
63
+ }
64
+
65
+ return mstatus;
66
+}
67
68
target_ulong helper_mret(CPURISCVState *env)
69
{
70
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mret(CPURISCVState *env)
71
mstatus = set_field(mstatus, MSTATUS_MPP,
72
riscv_has_ext(env, RVU) ? PRV_U : PRV_M);
73
mstatus = set_field(mstatus, MSTATUS_MPV, 0);
74
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
75
+ mstatus = ssdbltrp_mxret(env, mstatus, prev_priv, prev_virt);
76
+ }
77
if ((env->priv_ver >= PRIV_VERSION_1_12_0) && (prev_priv != PRV_M)) {
78
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
79
}
80
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mnret(CPURISCVState *env)
81
if (prev_priv < PRV_M) {
82
env->mstatus = set_field(env->mstatus, MSTATUS_MPRV, false);
83
}
84
+ if (riscv_cpu_cfg(env)->ext_ssdbltrp) {
85
+ env->mstatus = ssdbltrp_mxret(env, env->mstatus, prev_priv, prev_virt);
86
+ }
87
88
if (riscv_has_ext(env, RVH) && prev_virt) {
89
riscv_cpu_swap_hypervisor_regs(env);
90
--
91
2.47.1
92
93
diff view generated by jsdifflib
Deleted patch
1
From: Clément Léger <cleger@rivosinc.com>
2
1
3
When the Ssdbltrp ISA extension is enabled, if a trap happens in S-mode
4
while SSTATUS.SDT isn't cleared, generate a double trap exception to
5
M-mode.
6
7
Signed-off-by: Clément Léger <cleger@rivosinc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20250110125441.3208676-5-cleger@rivosinc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/cpu_bits.h | 1 +
13
target/riscv/cpu.c | 2 +-
14
target/riscv/cpu_helper.c | 42 ++++++++++++++++++++++++++++++++++-----
15
3 files changed, 39 insertions(+), 6 deletions(-)
16
17
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/cpu_bits.h
20
+++ b/target/riscv/cpu_bits.h
21
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
22
RISCV_EXCP_INST_PAGE_FAULT = 0xc, /* since: priv-1.10.0 */
23
RISCV_EXCP_LOAD_PAGE_FAULT = 0xd, /* since: priv-1.10.0 */
24
RISCV_EXCP_STORE_PAGE_FAULT = 0xf, /* since: priv-1.10.0 */
25
+ RISCV_EXCP_DOUBLE_TRAP = 0x10,
26
RISCV_EXCP_SW_CHECK = 0x12, /* since: priv-1.13.0 */
27
RISCV_EXCP_HW_ERR = 0x13, /* since: priv-1.13.0 */
28
RISCV_EXCP_INST_GUEST_PAGE_FAULT = 0x14,
29
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu.c
32
+++ b/target/riscv/cpu.c
33
@@ -XXX,XX +XXX,XX @@ static const char * const riscv_excp_names[] = {
34
"load_page_fault",
35
"reserved",
36
"store_page_fault",
37
- "reserved",
38
+ "double_trap",
39
"reserved",
40
"reserved",
41
"reserved",
42
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
43
index XXXXXXX..XXXXXXX 100644
44
--- a/target/riscv/cpu_helper.c
45
+++ b/target/riscv/cpu_helper.c
46
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
47
bool virt = env->virt_enabled;
48
bool write_gva = false;
49
bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
50
+ bool vsmode_exc;
51
uint64_t s;
52
int mode;
53
54
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
55
!(env->mip & (1ULL << cause));
56
bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
57
!(env->mip & (1ULL << cause));
58
+ bool smode_double_trap = false;
59
+ uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
60
target_ulong tval = 0;
61
target_ulong tinst = 0;
62
target_ulong htval = 0;
63
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
64
mode = env->priv <= PRV_S && cause < 64 &&
65
(((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
66
67
+ vsmode_exc = env->virt_enabled && (((hdeleg >> cause) & 1) || vs_injected);
68
+ /*
69
+ * Check double trap condition only if already in S-mode and targeting
70
+ * S-mode
71
+ */
72
+ if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
73
+ bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
74
+ bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
75
+ /* In VS or HS */
76
+ if (riscv_has_ext(env, RVH)) {
77
+ if (vsmode_exc) {
78
+ /* VS -> VS, use henvcfg instead of menvcfg*/
79
+ dte = (env->henvcfg & HENVCFG_DTE) != 0;
80
+ } else if (env->virt_enabled) {
81
+ /* VS -> HS, use mstatus_hs */
82
+ sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
83
+ }
84
+ }
85
+ smode_double_trap = dte && sdt;
86
+ if (smode_double_trap) {
87
+ mode = PRV_M;
88
+ }
89
+ }
90
+
91
if (mode == PRV_S) {
92
/* handle the trap in S-mode */
93
/* save elp status */
94
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
95
}
96
97
if (riscv_has_ext(env, RVH)) {
98
- uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
99
-
100
- if (env->virt_enabled &&
101
- (((hdeleg >> cause) & 1) || vs_injected)) {
102
+ if (vsmode_exc) {
103
/* Trap to VS mode */
104
/*
105
* See if we need to adjust cause. Yes if its VS mode interrupt
106
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
107
s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
108
s = set_field(s, MSTATUS_SPP, env->priv);
109
s = set_field(s, MSTATUS_SIE, 0);
110
+ if (riscv_env_smode_dbltrp_enabled(env, virt)) {
111
+ s = set_field(s, MSTATUS_SDT, 1);
112
+ }
113
env->mstatus = s;
114
sxlen = 16 << riscv_cpu_sxl(env);
115
env->scause = cause | ((target_ulong)async << (sxlen - 1));
116
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
117
s = set_field(s, MSTATUS_MIE, 0);
118
env->mstatus = s;
119
env->mcause = cause | ((target_ulong)async << (mxlen - 1));
120
+ if (smode_double_trap) {
121
+ env->mtval2 = env->mcause;
122
+ env->mcause = RISCV_EXCP_DOUBLE_TRAP;
123
+ } else {
124
+ env->mtval2 = mtval2;
125
+ }
126
env->mepc = env->pc;
127
env->mtval = tval;
128
- env->mtval2 = mtval2;
129
env->mtinst = tinst;
130
131
/*
132
--
133
2.47.1
134
135
diff view generated by jsdifflib
Deleted patch
1
From: Clément Léger <cleger@rivosinc.com>
2
1
3
Add the switch to enable the Ssdbltrp ISA extension.
4
5
Signed-off-by: Clément Léger <cleger@rivosinc.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Message-ID: <20250110125441.3208676-6-cleger@rivosinc.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
target/riscv/cpu.c | 2 ++
11
1 file changed, 2 insertions(+)
12
13
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/cpu.c
16
+++ b/target/riscv/cpu.c
17
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
18
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
19
ISA_EXT_DATA_ENTRY(sscounterenw, PRIV_VERSION_1_12_0, has_priv_1_12),
20
ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind),
21
+ ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp),
22
ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
23
ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
24
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
25
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
26
MULTI_EXT_CFG_BOOL("smnpm", ext_smnpm, false),
27
MULTI_EXT_CFG_BOOL("smstateen", ext_smstateen, false),
28
MULTI_EXT_CFG_BOOL("ssaia", ext_ssaia, false),
29
+ MULTI_EXT_CFG_BOOL("ssdbltrp", ext_ssdbltrp, false),
30
MULTI_EXT_CFG_BOOL("svade", ext_svade, false),
31
MULTI_EXT_CFG_BOOL("svadu", ext_svadu, true),
32
MULTI_EXT_CFG_BOOL("svinval", ext_svinval, false),
33
--
34
2.47.1
35
36
diff view generated by jsdifflib
Deleted patch
1
From: Clément Léger <cleger@rivosinc.com>
2
1
3
Add `ext_smdbltrp`in RISCVCPUConfig and implement MSTATUS.MDT behavior.
4
Also set MDT to 1 at reset according to the specification.
5
6
Signed-off-by: Clément Léger <cleger@rivosinc.com>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-ID: <20250110125441.3208676-7-cleger@rivosinc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
target/riscv/cpu_bits.h | 1 +
12
target/riscv/cpu_cfg.h | 1 +
13
target/riscv/cpu.c | 3 +++
14
target/riscv/csr.c | 13 +++++++++++++
15
4 files changed, 18 insertions(+)
16
17
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
18
index XXXXXXX..XXXXXXX 100644
19
--- a/target/riscv/cpu_bits.h
20
+++ b/target/riscv/cpu_bits.h
21
@@ -XXX,XX +XXX,XX @@
22
#define MSTATUS_MPELP 0x020000000000 /* zicfilp */
23
#define MSTATUS_GVA 0x4000000000ULL
24
#define MSTATUS_MPV 0x8000000000ULL
25
+#define MSTATUS_MDT 0x40000000000ULL /* Smdbltrp extension */
26
27
#define MSTATUS64_UXL 0x0000000300000000ULL
28
#define MSTATUS64_SXL 0x0000000C00000000ULL
29
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu_cfg.h
32
+++ b/target/riscv/cpu_cfg.h
33
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
34
bool ext_smcsrind;
35
bool ext_sscsrind;
36
bool ext_ssdbltrp;
37
+ bool ext_smdbltrp;
38
bool ext_svadu;
39
bool ext_svinval;
40
bool ext_svnapot;
41
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
42
index XXXXXXX..XXXXXXX 100644
43
--- a/target/riscv/cpu.c
44
+++ b/target/riscv/cpu.c
45
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj, ResetType type)
46
env->mstatus_hs = set_field(env->mstatus_hs,
47
MSTATUS64_UXL, env->misa_mxl);
48
}
49
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
50
+ env->mstatus = set_field(env->mstatus, MSTATUS_MDT, 1);
51
+ }
52
}
53
env->mcause = 0;
54
env->miclaim = MIP_SGEIP;
55
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/riscv/csr.c
58
+++ b/target/riscv/csr.c
59
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
60
}
61
}
62
63
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
64
+ mask |= MSTATUS_MDT;
65
+ if ((val & MSTATUS_MDT) != 0) {
66
+ val &= ~MSTATUS_MIE;
67
+ }
68
+ }
69
+
70
if (xl != MXL_RV32 || env->debugger) {
71
if (riscv_has_ext(env, RVH)) {
72
mask |= MSTATUS_MPV | MSTATUS_GVA;
73
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstatush(CPURISCVState *env, int csrno,
74
uint64_t valh = (uint64_t)val << 32;
75
uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
76
77
+ if (riscv_cpu_cfg(env)->ext_smdbltrp) {
78
+ mask |= MSTATUS_MDT;
79
+ if ((valh & MSTATUS_MDT) != 0) {
80
+ mask |= MSTATUS_MIE;
81
+ }
82
+ }
83
env->mstatus = (env->mstatus & ~mask) | (valh & mask);
84
85
return RISCV_EXCP_NONE;
86
--
87
2.47.1
88
89
diff view generated by jsdifflib
Deleted patch
1
From: Clément Léger <cleger@rivosinc.com>
2
1
3
When the Smsdbltrp ISA extension is enabled, if a trap happens while
4
MSTATUS.MDT is already set, it will trigger an abort or an NMI is the
5
Smrnmi extension is available.
6
7
Signed-off-by: Clément Léger <cleger@rivosinc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Message-ID: <20250110125441.3208676-9-cleger@rivosinc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/cpu_helper.c | 57 ++++++++++++++++++++++++++++-----------
13
1 file changed, 41 insertions(+), 16 deletions(-)
14
15
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
16
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/cpu_helper.c
18
+++ b/target/riscv/cpu_helper.c
19
@@ -XXX,XX +XXX,XX @@ static target_ulong promote_load_fault(target_ulong orig_cause)
20
/* if no promotion, return original cause */
21
return orig_cause;
22
}
23
+
24
+static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
25
+{
26
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
27
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
28
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
29
+ env->mncause = cause;
30
+ env->mnepc = env->pc;
31
+ env->pc = env->rnmi_irqvec;
32
+
33
+ if (cpu_get_fcfien(env)) {
34
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
35
+ }
36
+
37
+ /* Trapping to M mode, virt is disabled */
38
+ riscv_cpu_set_mode(env, PRV_M, false);
39
+}
40
+
41
/*
42
* Handle Traps
43
*
44
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
45
bool nnmi_excep = false;
46
47
if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
48
- env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
49
- env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV,
50
- env->virt_enabled);
51
- env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP,
52
- env->priv);
53
- env->mncause = cause | ((target_ulong)1U << (mxlen - 1));
54
- env->mnepc = env->pc;
55
- env->pc = env->rnmi_irqvec;
56
-
57
- if (cpu_get_fcfien(env)) {
58
- env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
59
- }
60
-
61
- /* Trapping to M mode, virt is disabled */
62
- riscv_cpu_set_mode(env, PRV_M, false);
63
-
64
+ riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
65
+ env->virt_enabled);
66
return;
67
}
68
69
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
70
/* Trapping to M mode, virt is disabled */
71
virt = false;
72
}
73
+ /*
74
+ * If the hart encounters an exception while executing in M-mode,
75
+ * with the mnstatus.NMIE bit clear, the program counter is set to
76
+ * the RNMI exception trap handler address.
77
+ */
78
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
79
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
80
+ !async;
81
82
s = env->mstatus;
83
s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
84
s = set_field(s, MSTATUS_MPP, env->priv);
85
s = set_field(s, MSTATUS_MIE, 0);
86
+ if (cpu->cfg.ext_smdbltrp) {
87
+ if (env->mstatus & MSTATUS_MDT) {
88
+ assert(env->priv == PRV_M);
89
+ if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
90
+ cpu_abort(CPU(cpu), "M-mode double trap\n");
91
+ } else {
92
+ riscv_do_nmi(env, cause, false);
93
+ return;
94
+ }
95
+ }
96
+
97
+ s = set_field(s, MSTATUS_MDT, 1);
98
+ }
99
env->mstatus = s;
100
env->mcause = cause | ((target_ulong)async << (mxlen - 1));
101
if (smode_double_trap) {
102
--
103
2.47.1
104
105
diff view generated by jsdifflib
Deleted patch
1
From: Clément Léger <cleger@rivosinc.com>
2
1
3
Add the switch to enable the Smdbltrp ISA extension.
4
5
Signed-off-by: Clément Léger <cleger@rivosinc.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Message-ID: <20250110125441.3208676-10-cleger@rivosinc.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
target/riscv/cpu.c | 2 ++
11
1 file changed, 2 insertions(+)
12
13
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/cpu.c
16
+++ b/target/riscv/cpu.c
17
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
18
ISA_EXT_DATA_ENTRY(smcdeleg, PRIV_VERSION_1_13_0, ext_smcdeleg),
19
ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
20
ISA_EXT_DATA_ENTRY(smcsrind, PRIV_VERSION_1_13_0, ext_smcsrind),
21
+ ISA_EXT_DATA_ENTRY(smdbltrp, PRIV_VERSION_1_13_0, ext_smdbltrp),
22
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
23
ISA_EXT_DATA_ENTRY(smrnmi, PRIV_VERSION_1_12_0, ext_smrnmi),
24
ISA_EXT_DATA_ENTRY(smmpm, PRIV_VERSION_1_13_0, ext_smmpm),
25
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
26
MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false),
27
28
MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
29
+ MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false),
30
MULTI_EXT_CFG_BOOL("smepmp", ext_smepmp, false),
31
MULTI_EXT_CFG_BOOL("smrnmi", ext_smrnmi, false),
32
MULTI_EXT_CFG_BOOL("smmpm", ext_smmpm, false),
33
--
34
2.47.1
35
36
diff view generated by jsdifflib
Deleted patch
1
From: Alexey Baturo <baturo.alexey@gmail.com>
2
1
3
The Zjpm v1.0 spec states there should be Supm and Sspm extensions that
4
are used in profile specification. Enabling Supm extension enables both
5
Ssnpm and Smnpm, while Sspm enables only Smnpm.
6
7
Signed-off-by: Alexey Baturo <baturo.alexey@gmail.com>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Message-ID: <20250113194410.1307494-1-baturo.alexey@gmail.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
target/riscv/cpu_cfg.h | 2 ++
13
target/riscv/cpu.c | 23 +++++++++++++++++++++++
14
2 files changed, 25 insertions(+)
15
16
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/cpu_cfg.h
19
+++ b/target/riscv/cpu_cfg.h
20
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
21
bool ext_ssnpm;
22
bool ext_smnpm;
23
bool ext_smmpm;
24
+ bool ext_sspm;
25
+ bool ext_supm;
26
bool rvv_ta_all_1s;
27
bool rvv_ma_all_1s;
28
bool rvv_vl_half_avl;
29
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu.c
32
+++ b/target/riscv/cpu.c
33
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
34
ISA_EXT_DATA_ENTRY(sscsrind, PRIV_VERSION_1_12_0, ext_sscsrind),
35
ISA_EXT_DATA_ENTRY(ssdbltrp, PRIV_VERSION_1_13_0, ext_ssdbltrp),
36
ISA_EXT_DATA_ENTRY(ssnpm, PRIV_VERSION_1_13_0, ext_ssnpm),
37
+ ISA_EXT_DATA_ENTRY(sspm, PRIV_VERSION_1_13_0, ext_sspm),
38
ISA_EXT_DATA_ENTRY(ssstateen, PRIV_VERSION_1_12_0, ext_ssstateen),
39
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
40
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
41
ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
42
+ ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
43
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
44
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
45
ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
46
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
47
MULTI_EXT_CFG_BOOL("zvfhmin", ext_zvfhmin, false),
48
MULTI_EXT_CFG_BOOL("sstc", ext_sstc, true),
49
MULTI_EXT_CFG_BOOL("ssnpm", ext_ssnpm, false),
50
+ MULTI_EXT_CFG_BOOL("sspm", ext_sspm, false),
51
+ MULTI_EXT_CFG_BOOL("supm", ext_supm, false),
52
53
MULTI_EXT_CFG_BOOL("smaia", ext_smaia, false),
54
MULTI_EXT_CFG_BOOL("smdbltrp", ext_smdbltrp, false),
55
@@ -XXX,XX +XXX,XX @@ static RISCVCPUImpliedExtsRule SSCFG_IMPLIED = {
56
},
57
};
58
59
+static RISCVCPUImpliedExtsRule SUPM_IMPLIED = {
60
+ .ext = CPU_CFG_OFFSET(ext_supm),
61
+ .implied_multi_exts = {
62
+ CPU_CFG_OFFSET(ext_ssnpm), CPU_CFG_OFFSET(ext_smnpm),
63
+
64
+ RISCV_IMPLIED_EXTS_RULE_END
65
+ },
66
+};
67
+
68
+static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
69
+ .ext = CPU_CFG_OFFSET(ext_sspm),
70
+ .implied_multi_exts = {
71
+ CPU_CFG_OFFSET(ext_smnpm),
72
+
73
+ RISCV_IMPLIED_EXTS_RULE_END
74
+ },
75
+};
76
+
77
RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
78
&RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
79
&RVM_IMPLIED, &RVV_IMPLIED, NULL
80
@@ -XXX,XX +XXX,XX @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
81
&ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
82
&ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
83
&ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
84
+ &SUPM_IMPLIED, &SSPM_IMPLIED,
85
NULL
86
};
87
88
--
89
2.47.1
diff view generated by jsdifflib
Deleted patch
1
From: Philippe Mathieu-Daudé <philmd@linaro.org>
2
1
3
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
5
Message-ID: <20250116223609.81594-1-philmd@linaro.org>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
8
hw/char/riscv_htif.c | 15 +++------------
9
hw/char/trace-events | 4 ++++
10
2 files changed, 7 insertions(+), 12 deletions(-)
11
12
diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c
13
index XXXXXXX..XXXXXXX 100644
14
--- a/hw/char/riscv_htif.c
15
+++ b/hw/char/riscv_htif.c
16
@@ -XXX,XX +XXX,XX @@
17
#include "exec/tswap.h"
18
#include "system/dma.h"
19
#include "system/runstate.h"
20
-
21
-#define RISCV_DEBUG_HTIF 0
22
-#define HTIF_DEBUG(fmt, ...) \
23
- do { \
24
- if (RISCV_DEBUG_HTIF) { \
25
- qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\
26
- } \
27
- } while (0)
28
+#include "trace.h"
29
30
#define HTIF_DEV_SHIFT 56
31
#define HTIF_CMD_SHIFT 48
32
@@ -XXX,XX +XXX,XX @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
33
uint64_t payload = val_written & 0xFFFFFFFFFFFFULL;
34
int resp = 0;
35
36
- HTIF_DEBUG("mtohost write: device: %d cmd: %d what: %02" PRIx64
37
- " -payload: %016" PRIx64 "\n", device, cmd, payload & 0xFF, payload);
38
+ trace_htif_uart_write_to_host(device, cmd, payload);
39
40
/*
41
* Currently, there is a fixed mapping of devices:
42
@@ -XXX,XX +XXX,XX @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
43
}
44
} else {
45
qemu_log("HTIF unknown device or command\n");
46
- HTIF_DEBUG("device: %d cmd: %d what: %02" PRIx64
47
- " payload: %016" PRIx64, device, cmd, payload & 0xFF, payload);
48
+ trace_htif_uart_unknown_device_command(device, cmd, payload);
49
}
50
/*
51
* Latest bbl does not set fromhost to 0 if there is a value in tohost.
52
diff --git a/hw/char/trace-events b/hw/char/trace-events
53
index XXXXXXX..XXXXXXX 100644
54
--- a/hw/char/trace-events
55
+++ b/hw/char/trace-events
56
@@ -XXX,XX +XXX,XX @@ stm32f2xx_usart_read(char *id, unsigned size, uint64_t ofs, uint64_t val) " %s s
57
stm32f2xx_usart_write(char *id, unsigned size, uint64_t ofs, uint64_t val) "%s size %d ofs 0x%02" PRIx64 " <- 0x%02" PRIx64
58
stm32f2xx_usart_drop(char *id) " %s dropping the chars"
59
stm32f2xx_usart_receive(char *id, uint8_t chr) " %s receiving '%c'"
60
+
61
+# riscv_htif.c
62
+htif_uart_write_to_host(uint8_t device, uint8_t cmd, uint64_t payload) "device: %u cmd: %02u payload: %016" PRIx64
63
+htif_uart_unknown_device_command(uint8_t device, uint8_t cmd, uint64_t payload) "device: %u cmd: %02u payload: %016" PRIx64
64
--
65
2.47.1
66
67
diff view generated by jsdifflib