1
The following changes since commit 661c2e1ab29cd9c4d268ae3f44712e8d421c0e56:
1
The following changes since commit c5ea91da443b458352c1b629b490ee6631775cb4:
2
2
3
scripts/checkpatch: Fix a typo (2025-03-04 09:30:26 +0800)
3
Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging (2023-09-08 10:06:25 -0400)
4
4
5
are available in the Git repository at:
5
are available in the Git repository at:
6
6
7
https://github.com/alistair23/qemu.git tags/pull-riscv-to-apply-20250305-1
7
https://github.com/alistair23/qemu.git tags/pull-riscv-to-apply-20230911
8
8
9
for you to fetch changes up to 4db19d5b21e058e6eb3474b6be470d1184afaa9e:
9
for you to fetch changes up to e7a03409f29e2da59297d55afbaec98c96e43e3a:
10
10
11
target/riscv/kvm: add missing KVM CSRs (2025-03-04 15:42:54 +1000)
11
target/riscv: don't read CSR in riscv_csrrw_do64 (2023-09-11 11:45:55 +1000)
12
12
13
----------------------------------------------------------------
13
----------------------------------------------------------------
14
Third RISC-V PR for 10.0
14
First RISC-V PR for 8.2
15
15
16
* CSR coverity fixes
16
* Remove 'host' CPU from TCG
17
* Fix unexpected behavior of vector reduction instructions when vl is 0
17
* riscv_htif Fixup printing on big endian hosts
18
* Fix incorrect vlen comparison in prop_vlen_set
18
* Add zmmul isa string
19
* Throw debug exception before page fault
19
* Add smepmp isa string
20
* Remove redundant "hart_idx" masking from APLIC
20
* Fix page_check_range use in fault-only-first
21
* Add support for Control Transfer Records Ext
21
* Use existing lookup tables for MixColumns
22
* Remove redundant struct members from the IOMMU
22
* Add RISC-V vector cryptographic instruction set support
23
* Remove duplicate definitions from the IOMMU
23
* Implement WARL behaviour for mcountinhibit/mcounteren
24
* Fix tick_offset migration for Goldfish RTC
24
* Add Zihintntl extension ISA string to DTS
25
* Add serial alias in virt machine DTB
25
* Fix zfa fleq.d and fltq.d
26
* Remove Bin Meng from RISC-V maintainers
26
* Fix upper/lower mtime write calculation
27
* Add support for Control Transfer Records Ext
27
* Make rtc variable names consistent
28
* Log guest errors when reserved bits are set in PTEs
28
* Use abi type for linux-user target_ucontext
29
* Add missing Sdtrig disas CSRs
29
* Add RISC-V KVM AIA Support
30
* Correct the hpmevent sscofpmf mask
30
* Fix riscv,pmu DT node path in the virt machine
31
* Mask upper sscofpmf bits during validation
31
* Update CSR bits name for svadu extension
32
* Remove warnings about Smdbltrp/Smrnmi being disabled
32
* Mark zicond non-experimental
33
* Respect mseccfg.RLB bit for TOR mode PMP entry
33
* Fix satp_mode_finalize() when satp_mode.supported = 0
34
* Update KVM support to Linux 6.14-rc3
34
* Fix non-KVM --enable-debug build
35
* IOMMU HPM support
35
* Add new extensions to hwprobe
36
* Support Sscofpmf/Svade/Svadu/Smnpm/Ssnpm extensions in KVM
36
* Use accelerated helper for AES64KS1I
37
* Add --ignore-family option to binfmt
37
* Allocate itrigger timers only once
38
* Refinement for AIA with KVM acceleration
38
* Respect mseccfg.RLB for pmpaddrX changes
39
* Reset time changes for KVM
39
* Align the AIA model to v1.0 ratified spec
40
* Don't read the CSR in riscv_csrrw_do64
40
41
41
----------------------------------------------------------------
42
----------------------------------------------------------------
42
Alistair Francis (1):
43
Akihiko Odaki (1):
43
MAINTAINERS: Remove Bin Meng from RISC-V maintainers
44
target/riscv: Allocate itrigger timers only once
44
45
45
Andrea Bolognani (3):
46
Ard Biesheuvel (2):
46
binfmt: Shuffle things around
47
target/riscv: Use existing lookup tables for MixColumns
47
binfmt: Normalize host CPU architecture
48
target/riscv: Use accelerated helper for AES64KS1I
48
binfmt: Add --ignore-family option
49
49
50
Atish Patra (2):
50
Conor Dooley (1):
51
target/riscv: Fix the hpmevent mask
51
hw/riscv: virt: Fix riscv,pmu DT node path
52
target/riscv: Mask out upper sscofpmf bits during validation
53
52
54
Clément Léger (1):
53
Daniel Henrique Barboza (6):
55
target/riscv: remove warnings about Smdbltrp/Smrnmi being disabled
54
target/riscv/cpu.c: do not run 'host' CPU with TCG
55
target/riscv/cpu.c: add zmmul isa string
56
target/riscv/cpu.c: add smepmp isa string
57
target/riscv: fix satp_mode_finalize() when satp_mode.supported = 0
58
hw/riscv/virt.c: fix non-KVM --enable-debug build
59
hw/intc/riscv_aplic.c fix non-KVM --enable-debug build
56
60
57
Daniel Henrique Barboza (22):
61
Dickon Hood (2):
58
target/riscv/csr.c: fix deadcode in rmw_xireg()
62
target/riscv: Refactor translation of vector-widening instruction
59
target/riscv/csr.c: fix 'ret' deadcode in rmw_xireg()
63
target/riscv: Add Zvbb ISA extension support
60
target/riscv/csr.c: fix deadcode in rmw_xiregi()
61
target/riscv/csr.c: fix deadcode in aia_smode32()
62
target/riscv/cpu_helper.c: fix bad_shift in riscv_cpu_interrupt()
63
target/riscv/debug.c: use wp size = 4 for 32-bit CPUs
64
target/riscv: throw debug exception before page fault
65
target/riscv: add ssu64xl
66
target/riscv: use RVB in RVA22U64
67
target/riscv: add profile u_parent and s_parent
68
target/riscv: change priv_ver check in validate_profile()
69
target/riscv: add RVA23U64 profile
70
target/riscv: add RVA23S64 profile
71
linux-headers: Update to Linux v6.14-rc3
72
target/riscv/cpu.c: create flag for ziccrse
73
target/riscv/kvm: add extensions after 6.14-rc3 update
74
hw/riscv/riscv-iommu.h: add missing headers
75
hw/riscv: add IOMMU HPM trace events
76
docs/specs/riscv-iommu.rst: add HPM support info
77
target/riscv/cpu: remove unneeded !kvm_enabled() check
78
target/riscv/kvm: add kvm_riscv_reset_regs_csr()
79
target/riscv/kvm: add missing KVM CSRs
80
64
81
Huang Borong (1):
65
Jason Chien (3):
82
hw/intc/riscv_aplic: Remove redundant "hart_idx" masking
66
target/riscv: Add Zihintntl extension ISA string to DTS
67
hw/intc: Fix upper/lower mtime write calculation
68
hw/intc: Make rtc variable names consistent
83
69
84
Jason Chien (2):
70
Kiran Ostrolenk (4):
85
hw/riscv/riscv-iommu: Remove redundant struct members
71
target/riscv: Refactor some of the generic vector functionality
86
hw/riscv/riscv-iommu-bits: Remove duplicate definitions
72
target/riscv: Refactor vector-vector translation macro
73
target/riscv: Refactor some of the generic vector functionality
74
target/riscv: Add Zvknh ISA extension support
87
75
88
Max Chou (2):
76
LIU Zhiwei (3):
89
target/riscv: rvv: Fix unexpected behavior of vector reduction instructions when vl is 0
77
target/riscv: Fix page_check_range use in fault-only-first
90
target/riscv: rvv: Fix incorrect vlen comparison in prop_vlen_set
78
target/riscv: Fix zfa fleq.d and fltq.d
79
linux-user/riscv: Use abi type for target_ucontext
91
80
92
Quan Zhou (1):
81
Lawrence Hunter (2):
93
target/riscv/kvm: Add some exts support
82
target/riscv: Add Zvbc ISA extension support
83
target/riscv: Add Zvksh ISA extension support
94
84
95
Rajnesh Kanwal (7):
85
Leon Schuermann (1):
96
target/riscv: Remove obsolete sfence.vm instruction
86
target/riscv/pmp.c: respect mseccfg.RLB for pmpaddrX changes
97
target/riscv: Add Control Transfer Records CSR definitions.
98
target/riscv: Add support for Control Transfer Records extension CSRs.
99
target/riscv: Add support to record CTR entries.
100
target/riscv: Add CTR sctrclr instruction.
101
target/riscv: machine: Add Control Transfer Record state description
102
target/riscv: Add support to access ctrsource, ctrtarget, ctrdata regs.
103
87
104
Rob Bradford (3):
88
Max Chou (3):
105
disas/riscv: Fix minor whitespace issues
89
crypto: Create sm4_subword
106
disas/riscv: Add missing Sdtrig CSRs
90
crypto: Add SM4 constant parameter CK
107
target/riscv: Respect mseccfg.RLB bit for TOR mode PMP entry
91
target/riscv: Add Zvksed ISA extension support
108
92
109
Rodrigo Dias Correa (1):
93
Nazar Kazakov (4):
110
goldfish_rtc: Fix tick_offset migration
94
target/riscv: Remove redundant "cpu_vl == 0" checks
95
target/riscv: Move vector translation checks
96
target/riscv: Add Zvkned ISA extension support
97
target/riscv: Add Zvkg ISA extension support
111
98
112
Tomasz Jeznach (8):
99
Nikita Shubin (1):
113
hw/riscv/riscv-iommu-bits.h: HPM bits
100
target/riscv: don't read CSR in riscv_csrrw_do64
114
hw/riscv/riscv-iommu: add riscv-iommu-hpm file
115
hw/riscv/riscv-iommu: add riscv_iommu_hpm_incr_ctr()
116
hw/riscv/riscv-iommu: instantiate hpm_timer
117
hw/riscv/riscv-iommu: add IOCOUNTINH mmio writes
118
hw/riscv/riscv-iommu: add IOHPMCYCLES mmio write
119
hw/riscv/riscv-iommu: add hpm events mmio write
120
hw/riscv/riscv-iommu.c: add RISCV_IOMMU_CAP_HPM cap
121
101
122
Vasilis Liaskovitis (1):
102
Rob Bradford (1):
123
hw/riscv/virt: Add serial alias in DTB
103
target/riscv: Implement WARL behaviour for mcountinhibit/mcounteren
124
104
125
Yong-Xuan Wang (3):
105
Robbin Ehn (1):
126
hw/intc/imsic: refine the IMSIC realize
106
linux-user/riscv: Add new extensions to hwprobe
127
hw/intc/aplic: refine the APLIC realize
128
hw/intc/aplic: refine kvm_msicfgaddr
129
107
130
julia (1):
108
Thomas Huth (2):
131
target/riscv: log guest errors when reserved bits are set in PTEs
109
hw/char/riscv_htif: Fix printing of console characters on big endian hosts
110
hw/char/riscv_htif: Fix the console syscall on big endian hosts
132
111
133
MAINTAINERS | 5 +-
112
Tommy Wu (1):
134
docs/specs/riscv-iommu.rst | 2 +
113
target/riscv: Align the AIA model to v1.0 ratified spec
135
hw/riscv/riscv-iommu-bits.h | 69 +++-
136
hw/riscv/riscv-iommu-hpm.h | 33 ++
137
hw/riscv/riscv-iommu.h | 32 +-
138
include/standard-headers/linux/ethtool.h | 4 +
139
include/standard-headers/linux/fuse.h | 76 +++-
140
include/standard-headers/linux/input-event-codes.h | 1 +
141
include/standard-headers/linux/pci_regs.h | 16 +-
142
include/standard-headers/linux/virtio_pci.h | 14 +
143
linux-headers/asm-arm64/kvm.h | 3 -
144
linux-headers/asm-loongarch/kvm_para.h | 1 +
145
linux-headers/asm-riscv/kvm.h | 7 +-
146
linux-headers/asm-x86/kvm.h | 1 +
147
linux-headers/linux/iommufd.h | 35 +-
148
linux-headers/linux/kvm.h | 8 +-
149
linux-headers/linux/stddef.h | 13 +-
150
linux-headers/linux/vduse.h | 2 +-
151
target/riscv/cpu-qom.h | 2 +
152
target/riscv/cpu.h | 16 +-
153
target/riscv/cpu_bits.h | 150 +++++++-
154
target/riscv/cpu_cfg.h | 5 +
155
target/riscv/helper.h | 2 +
156
target/riscv/insn32.decode | 2 +-
157
disas/riscv.c | 16 +-
158
hw/intc/riscv_aplic.c | 74 ++--
159
hw/intc/riscv_imsic.c | 47 +--
160
hw/riscv/riscv-iommu-hpm.c | 381 +++++++++++++++++++++
161
hw/riscv/riscv-iommu.c | 131 ++++++-
162
hw/riscv/virt.c | 3 +
163
hw/rtc/goldfish_rtc.c | 43 +--
164
target/riscv/cpu.c | 115 ++++++-
165
target/riscv/cpu_helper.c | 315 ++++++++++++++++-
166
target/riscv/csr.c | 318 +++++++++++++++--
167
target/riscv/debug.c | 6 +-
168
target/riscv/kvm/kvm-cpu.c | 40 ++-
169
target/riscv/machine.c | 25 ++
170
target/riscv/op_helper.c | 48 +++
171
target/riscv/pmp.c | 2 +-
172
target/riscv/pmu.c | 2 +-
173
target/riscv/tcg/tcg-cpu.c | 58 +++-
174
target/riscv/translate.c | 46 +++
175
target/riscv/vector_helper.c | 8 +-
176
target/riscv/insn_trans/trans_privileged.c.inc | 18 +-
177
target/riscv/insn_trans/trans_rvi.c.inc | 75 ++++
178
target/riscv/insn_trans/trans_rvzce.c.inc | 21 ++
179
hw/riscv/meson.build | 3 +-
180
hw/riscv/trace-events | 5 +
181
scripts/qemu-binfmt-conf.sh | 78 +++--
182
tests/data/acpi/riscv64/virt/RHCT | Bin 390 -> 400 bytes
183
50 files changed, 2106 insertions(+), 271 deletions(-)
184
create mode 100644 hw/riscv/riscv-iommu-hpm.h
185
create mode 100644 hw/riscv/riscv-iommu-hpm.c
186
114
115
Vineet Gupta (1):
116
riscv: zicond: make non-experimental
117
118
Weiwei Li (1):
119
target/riscv: Update CSR bits name for svadu extension
120
121
Yong-Xuan Wang (5):
122
target/riscv: support the AIA device emulation with KVM enabled
123
target/riscv: check the in-kernel irqchip support
124
target/riscv: Create an KVM AIA irqchip
125
target/riscv: update APLIC and IMSIC to support KVM AIA
126
target/riscv: select KVM AIA in riscv virt machine
127
128
include/crypto/aes.h | 7 +
129
include/crypto/sm4.h | 9 +
130
target/riscv/cpu_bits.h | 8 +-
131
target/riscv/cpu_cfg.h | 9 +
132
target/riscv/debug.h | 3 +-
133
target/riscv/helper.h | 98 +++
134
target/riscv/kvm_riscv.h | 5 +
135
target/riscv/vector_internals.h | 228 +++++++
136
target/riscv/insn32.decode | 58 ++
137
crypto/aes.c | 4 +-
138
crypto/sm4.c | 10 +
139
hw/char/riscv_htif.c | 12 +-
140
hw/intc/riscv_aclint.c | 11 +-
141
hw/intc/riscv_aplic.c | 52 +-
142
hw/intc/riscv_imsic.c | 25 +-
143
hw/riscv/virt.c | 374 ++++++------
144
linux-user/riscv/signal.c | 4 +-
145
linux-user/syscall.c | 14 +-
146
target/arm/tcg/crypto_helper.c | 10 +-
147
target/riscv/cpu.c | 83 ++-
148
target/riscv/cpu_helper.c | 6 +-
149
target/riscv/crypto_helper.c | 51 +-
150
target/riscv/csr.c | 54 +-
151
target/riscv/debug.c | 15 +-
152
target/riscv/kvm.c | 201 ++++++-
153
target/riscv/pmp.c | 4 +
154
target/riscv/translate.c | 1 +
155
target/riscv/vcrypto_helper.c | 970 ++++++++++++++++++++++++++++++
156
target/riscv/vector_helper.c | 245 +-------
157
target/riscv/vector_internals.c | 81 +++
158
target/riscv/insn_trans/trans_rvv.c.inc | 171 +++---
159
target/riscv/insn_trans/trans_rvvk.c.inc | 606 +++++++++++++++++++
160
target/riscv/insn_trans/trans_rvzfa.c.inc | 4 +-
161
target/riscv/meson.build | 4 +-
162
34 files changed, 2785 insertions(+), 652 deletions(-)
163
create mode 100644 target/riscv/vector_internals.h
164
create mode 100644 target/riscv/vcrypto_helper.c
165
create mode 100644 target/riscv/vector_internals.c
166
create mode 100644 target/riscv/insn_trans/trans_rvvk.c.inc
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
This header is incomplete, i.e. it is using definitions that are being
3
The 'host' CPU is available in a CONFIG_KVM build and it's currently
4
supplied by the .c files that are including it.
4
available for all accels, but is a KVM only CPU. This means that in a
5
RISC-V KVM capable host we can do things like this:
5
6
6
Adding this header into a fresh .c file will result in errors:
7
$ ./build/qemu-system-riscv64 -M virt,accel=tcg -cpu host --nographic
8
qemu-system-riscv64: H extension requires priv spec 1.12.0
7
9
8
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:30:17: error: field ‘parent_obj’ has incomplete type
10
This CPU does not have a priv spec because we don't filter its extensions
9
30 | DeviceState parent_obj;
11
via priv spec. We shouldn't be reaching riscv_cpu_realize_tcg() at all
10
| ^~~~~~~~~~
12
with the 'host' CPU.
11
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:50:5: error: unknown type name ‘dma_addr_t’; did you mean ‘in_addr_t’?
12
50 | dma_addr_t cq_addr; /* Command queue base physical address */
13
| ^~~~~~~~~~
14
| in_addr_t
15
(...)
16
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:62:5: error: unknown type name ‘QemuThread’; did you mean ‘GThread’?
17
62 | QemuThread core_proc; /* Background processing thread */
18
| ^~~~~~~~~~
19
| GThread
20
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:63:5: error: unknown type name ‘QemuCond’
21
63 | QemuCond core_cond; /* Background processing wake up signal */
22
| ^~~~~~~~
23
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:71:18: error: field ‘trap_as’ has incomplete type
24
71 | AddressSpace trap_as;
25
| ^~~~~~~
26
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:72:18: error: field ‘trap_mr’ has incomplete type
27
72 | MemoryRegion trap_mr;
28
| ^~~~~~~
29
/home/danielhb/work/qemu/hw/riscv/riscv-iommu.h:80:18: error: field ‘regs_mr’ has incomplete type
30
80 | MemoryRegion regs_mr;
31
| ^~~~~~~
32
13
33
Fix it by adding the missing headers for these definitions.
14
We don't have a way to filter the 'host' CPU out of the available CPU
15
options (-cpu help) if the build includes both KVM and TCG. What we can
16
do is to error out during riscv_cpu_realize_tcg() if the user chooses
17
the 'host' CPU with accel=tcg:
18
19
$ ./build/qemu-system-riscv64 -M virt,accel=tcg -cpu host --nographic
20
qemu-system-riscv64: 'host' CPU is not compatible with TCG acceleration
34
21
35
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
22
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
36
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
23
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
37
Message-ID: <20250224190826.1858473-2-dbarboza@ventanamicro.com>
24
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
25
Message-Id: <20230721133411.474105-1-dbarboza@ventanamicro.com>
38
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
26
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
39
---
27
---
40
hw/riscv/riscv-iommu.h | 2 ++
28
target/riscv/cpu.c | 5 +++++
41
1 file changed, 2 insertions(+)
29
1 file changed, 5 insertions(+)
42
30
43
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
31
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
44
index XXXXXXX..XXXXXXX 100644
32
index XXXXXXX..XXXXXXX 100644
45
--- a/hw/riscv/riscv-iommu.h
33
--- a/target/riscv/cpu.c
46
+++ b/hw/riscv/riscv-iommu.h
34
+++ b/target/riscv/cpu.c
47
@@ -XXX,XX +XXX,XX @@
35
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp)
48
#define HW_RISCV_IOMMU_STATE_H
36
CPURISCVState *env = &cpu->env;
49
37
Error *local_err = NULL;
50
#include "qom/object.h"
38
51
+#include "hw/qdev-properties.h"
39
+ if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) {
52
+#include "system/dma.h"
40
+ error_setg(errp, "'host' CPU is not compatible with TCG acceleration");
53
#include "hw/riscv/iommu.h"
41
+ return;
54
#include "hw/riscv/riscv-iommu-bits.h"
42
+ }
55
43
+
44
riscv_cpu_validate_misa_mxl(cpu, &local_err);
45
if (local_err != NULL) {
46
error_propagate(errp, local_err);
56
--
47
--
57
2.48.1
48
2.41.0
58
49
59
50
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Thomas Huth <thuth@redhat.com>
2
2
3
Remove the !kvm_enabled() check in kvm_riscv_reset_vcpu() since the
3
The character that should be printed is stored in the 64 bit "payload"
4
function is already being gated by kvm_enabled() in
4
variable. The code currently tries to print it by taking the address
5
riscv_cpu_reset_hold().
5
of the variable and passing this pointer to qemu_chr_fe_write(). However,
6
this only works on little endian hosts where the least significant bits
7
are stored on the lowest address. To do this in a portable way, we have
8
to store the value in an uint8_t variable instead.
6
9
7
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Fixes: 5033606780 ("RISC-V HTIF Console")
8
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
11
Signed-off-by: Thomas Huth <thuth@redhat.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250224123120.1644186-2-dbarboza@ventanamicro.com>
13
Reviewed-by: Bin Meng <bmeng@tinylab.org>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
15
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
16
Message-Id: <20230721094720.902454-2-thuth@redhat.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
18
---
13
target/riscv/kvm/kvm-cpu.c | 3 ---
19
hw/char/riscv_htif.c | 3 ++-
14
1 file changed, 3 deletions(-)
20
1 file changed, 2 insertions(+), 1 deletion(-)
15
21
16
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
22
diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c
17
index XXXXXXX..XXXXXXX 100644
23
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/kvm/kvm-cpu.c
24
--- a/hw/char/riscv_htif.c
19
+++ b/target/riscv/kvm/kvm-cpu.c
25
+++ b/hw/char/riscv_htif.c
20
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
26
@@ -XXX,XX +XXX,XX @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
21
CPURISCVState *env = &cpu->env;
27
s->tohost = 0; /* clear to indicate we read */
22
int i;
28
return;
23
29
} else if (cmd == HTIF_CONSOLE_CMD_PUTC) {
24
- if (!kvm_enabled()) {
30
- qemu_chr_fe_write(&s->chr, (uint8_t *)&payload, 1);
25
- return;
31
+ uint8_t ch = (uint8_t)payload;
26
- }
32
+ qemu_chr_fe_write(&s->chr, &ch, 1);
27
for (i = 0; i < 32; i++) {
33
resp = 0x100 | (uint8_t)payload;
28
env->gpr[i] = 0;
34
} else {
29
}
35
qemu_log("HTIF device %d: unknown command\n", device);
30
--
36
--
31
2.48.1
37
2.41.0
38
39
diff view generated by jsdifflib
1
From: Andrea Bolognani <abologna@redhat.com>
1
From: Thomas Huth <thuth@redhat.com>
2
2
3
Right now information regarding the family each CPU type belongs
3
Values that have been read via cpu_physical_memory_read() from the
4
to is recorded in two places: the large data table at the top of
4
guest's memory have to be swapped in case the host endianess differs
5
the script, and the qemu_host_family() function.
5
from the guest.
6
6
7
We can make things better by mapping host CPU architecture to
7
Fixes: a6e13e31d5 ("riscv_htif: Support console output via proxy syscall")
8
QEMU target in the few cases where the two don't already match
8
Signed-off-by: Thomas Huth <thuth@redhat.com>
9
and then using the data table to look up the family, same as
10
we're already doing for the guest CPU architecture.
11
12
Being able to reason in terms of QEMU target regardless of
13
whether we're looking at the host or guest CPU architecture will
14
come in handy to implement upcoming changes.
15
16
A couple of entries are dropped in the process: BePC and Power
17
Macintosh. I'm quite certain neither of those have ever been
18
reported as CPU architectures by Linux. I believe many more of
19
the entries that are carried forward could be dropped as well,
20
but I don't have the same level of confidence there so I
21
decided to play it safe just in case.
22
23
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
24
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
25
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
10
Reviewed-by: Bin Meng <bmeng@tinylab.org>
26
Message-ID: <20250127182924.103510-3-abologna@redhat.com>
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Message-Id: <20230721094720.902454-3-thuth@redhat.com>
27
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
28
---
14
---
29
scripts/qemu-binfmt-conf.sh | 44 +++++++++++++++++++++----------------
15
hw/char/riscv_htif.c | 9 +++++----
30
1 file changed, 25 insertions(+), 19 deletions(-)
16
1 file changed, 5 insertions(+), 4 deletions(-)
31
17
32
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
18
diff --git a/hw/char/riscv_htif.c b/hw/char/riscv_htif.c
33
index XXXXXXX..XXXXXXX 100755
19
index XXXXXXX..XXXXXXX 100644
34
--- a/scripts/qemu-binfmt-conf.sh
20
--- a/hw/char/riscv_htif.c
35
+++ b/scripts/qemu-binfmt-conf.sh
21
+++ b/hw/char/riscv_htif.c
36
@@ -XXX,XX +XXX,XX @@ loongarch64_magic='\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x
22
@@ -XXX,XX +XXX,XX @@
37
loongarch64_mask='\xff\xff\xff\xff\xff\xff\xff\xfc\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'
23
#include "qemu/timer.h"
38
loongarch64_family=loongarch
24
#include "qemu/error-report.h"
39
25
#include "exec/address-spaces.h"
40
-qemu_get_family() {
26
+#include "exec/tswap.h"
41
- cpu=${HOST_ARCH:-$(uname -m)}
27
#include "sysemu/dma.h"
42
+# Converts the name of a host CPU architecture to the corresponding QEMU
28
43
+# target.
29
#define RISCV_DEBUG_HTIF 0
44
+#
30
@@ -XXX,XX +XXX,XX @@ static void htif_handle_tohost_write(HTIFState *s, uint64_t val_written)
45
+# FIXME: This can probably be simplified a lot by dropping most entries.
31
} else {
46
+# Remember that the script is only used on Linux, so we only need to
32
uint64_t syscall[8];
47
+# handle the strings Linux uses to report the host CPU architecture.
33
cpu_physical_memory_read(payload, syscall, sizeof(syscall));
48
+qemu_normalize() {
34
- if (syscall[0] == PK_SYS_WRITE &&
49
+ cpu="$1"
35
- syscall[1] == HTIF_DEV_CONSOLE &&
50
case "$cpu" in
36
- syscall[3] == HTIF_CONSOLE_CMD_PUTC) {
51
- amd64|i386|i486|i586|i686|i86pc|BePC|x86_64)
37
+ if (tswap64(syscall[0]) == PK_SYS_WRITE &&
52
+ i[3-6]86)
38
+ tswap64(syscall[1]) == HTIF_DEV_CONSOLE &&
53
echo "i386"
39
+ tswap64(syscall[3]) == HTIF_CONSOLE_CMD_PUTC) {
54
;;
40
uint8_t ch;
55
- mips*)
41
- cpu_physical_memory_read(syscall[2], &ch, 1);
56
- echo "mips"
42
+ cpu_physical_memory_read(tswap64(syscall[2]), &ch, 1);
57
+ amd64)
43
qemu_chr_fe_write(&s->chr, &ch, 1);
58
+ echo "x86_64"
44
resp = 0x100 | (uint8_t)payload;
59
;;
45
} else {
60
- "Power Macintosh"|ppc64|powerpc|ppc)
61
+ powerpc)
62
echo "ppc"
63
;;
64
- ppc64el|ppc64le)
65
- echo "ppcle"
66
+ ppc64el)
67
+ echo "ppc64le"
68
;;
69
- arm|armel|armhf|arm64|armv[4-9]*l|aarch64)
70
+ armel|armhf|armv[4-9]*l)
71
echo "arm"
72
;;
73
- armeb|armv[4-9]*b|aarch64_be)
74
+ armv[4-9]*b)
75
echo "armeb"
76
;;
77
- sparc*)
78
- echo "sparc"
79
- ;;
80
- riscv*)
81
- echo "riscv"
82
- ;;
83
- loongarch*)
84
- echo "loongarch"
85
+ arm64)
86
+ echo "aarch64"
87
;;
88
*)
89
echo "$cpu"
90
@@ -XXX,XX +XXX,XX @@ EOF
91
92
qemu_set_binfmts() {
93
# probe cpu type
94
- host_family=$(qemu_get_family)
95
+ host_cpu=$(qemu_normalize ${HOST_ARCH:-$(uname -m)})
96
+ host_family=$(eval echo \$${host_cpu}_family)
97
+
98
+ if [ "$host_family" = "" ] ; then
99
+ echo "INTERNAL ERROR: unknown host cpu $host_cpu" 1>&2
100
+ exit 1
101
+ fi
102
103
# register the interpreter for each cpu except for the native one
104
105
--
46
--
106
2.48.1
47
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
Add RVA23S64 as described in [1]. This profile inherits all mandatory
3
zmmul was promoted from experimental to ratified in commit 6d00ffad4e95.
4
extensions of RVA23U64 and RVA22S64, making it a child of both profiles.
4
Add a riscv,isa string for it.
5
5
6
A new "rva23s64" profile CPU is also added. This is the generated
6
Fixes: 6d00ffad4e95 ("target/riscv: move zmmul out of the experimental properties")
7
riscv,isa for it (taken via -M dumpdtb):
8
9
rv64imafdcbvh_zic64b_zicbom_zicbop_zicboz_ziccamoa_ziccif_zicclsm_
10
ziccrse_zicond_zicntr_zicsr_zifencei_zihintntl_zihintpause_zihpm_zimop_
11
zmmul_za64rs_zaamo_zalrsc_zawrs_zfa_zfhmin_zca_zcb_zcd_zcmop_zba_zbb_zbs_
12
zkt_zvbb_zve32f_zve32x_zve64f_zve64d_zve64x_zvfhmin_zvkb_zvkt_shcounterenw_
13
sha_shgatpa_shtvala_shvsatpa_shvstvala_shvstvecd_smnpm_smstateen_ssccptr_
14
sscofpmf_sscounterenw_ssnpm_ssstateen_sstc_sstvala_sstvecd_ssu64xl_
15
supm_svade_svinval_svnapot_svpbmt
16
17
[1] https://github.com/riscv/riscv-profiles/blob/main/src/rva23-profile.adoc
18
19
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
20
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
8
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
21
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
22
Message-ID: <20250115184316.2344583-7-dbarboza@ventanamicro.com>
10
Message-Id: <20230720132424.371132-2-dbarboza@ventanamicro.com>
23
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
24
---
12
---
25
target/riscv/cpu-qom.h | 1 +
13
target/riscv/cpu.c | 1 +
26
target/riscv/cpu.c | 39 +++++++++++++++++++++++++++++++++++++++
14
1 file changed, 1 insertion(+)
27
2 files changed, 40 insertions(+)
28
15
29
diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu-qom.h
32
+++ b/target/riscv/cpu-qom.h
33
@@ -XXX,XX +XXX,XX @@
34
#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
35
#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
36
#define TYPE_RISCV_CPU_RVA23U64 RISCV_CPU_TYPE_NAME("rva23u64")
37
+#define TYPE_RISCV_CPU_RVA23S64 RISCV_CPU_TYPE_NAME("rva23s64")
38
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
39
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
40
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
41
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
16
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
42
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
43
--- a/target/riscv/cpu.c
18
--- a/target/riscv/cpu.c
44
+++ b/target/riscv/cpu.c
19
+++ b/target/riscv/cpu.c
45
@@ -XXX,XX +XXX,XX @@ static RISCVCPUProfile RVA23U64 = {
20
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
46
}
21
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
47
};
22
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
48
23
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
49
+/*
24
+ ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
50
+ * As with RVA23U64, RVA23S64 also defines 'named features'.
25
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
51
+ *
26
ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
52
+ * Cache related features that we consider enabled since we don't
27
ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
53
+ * implement cache: Ssccptr
54
+ *
55
+ * Other named features that we already implement: Sstvecd, Sstvala,
56
+ * Sscounterenw, Ssu64xl
57
+ *
58
+ * The remaining features/extensions comes from RVA23S64.
59
+ */
60
+static RISCVCPUProfile RVA23S64 = {
61
+ .u_parent = &RVA23U64,
62
+ .s_parent = &RVA22S64,
63
+ .name = "rva23s64",
64
+ .misa_ext = RVS,
65
+ .priv_spec = PRIV_VERSION_1_13_0,
66
+ .satp_mode = VM_1_10_SV39,
67
+ .ext_offsets = {
68
+ /* New in RVA23S64 */
69
+ CPU_CFG_OFFSET(ext_svnapot), CPU_CFG_OFFSET(ext_sstc),
70
+ CPU_CFG_OFFSET(ext_sscofpmf), CPU_CFG_OFFSET(ext_ssnpm),
71
+
72
+ /* Named features: Sha */
73
+ CPU_CFG_OFFSET(ext_sha),
74
+
75
+ RISCV_PROFILE_EXT_LIST_END
76
+ }
77
+};
78
+
79
RISCVCPUProfile *riscv_profiles[] = {
80
&RVA22U64,
81
&RVA22S64,
82
&RVA23U64,
83
+ &RVA23S64,
84
NULL,
85
};
86
87
@@ -XXX,XX +XXX,XX @@ static void rva23u64_profile_cpu_init(Object *obj)
88
89
RVA23U64.enabled = true;
90
}
91
+
92
+static void rva23s64_profile_cpu_init(Object *obj)
93
+{
94
+ rv64i_bare_cpu_init(obj);
95
+
96
+ RVA23S64.enabled = true;
97
+}
98
#endif
99
100
static const gchar *riscv_gdb_arch_name(CPUState *cs)
101
@@ -XXX,XX +XXX,XX @@ static const TypeInfo riscv_cpu_type_infos[] = {
102
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
103
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
104
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init),
105
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23S64, MXL_RV64, rva23s64_profile_cpu_init),
106
#endif /* TARGET_RISCV64 */
107
};
108
109
--
28
--
110
2.48.1
29
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
From the time we added RVA22U64 until now the spec didn't declare 'RVB'
3
The cpu->cfg.epmp extension is still experimental, but it already has a
4
as a dependency, using zba/zbb/zbs instead. Since then the RVA22 spec
4
'smepmp' riscv,isa string. Add it.
5
[1] added the following in the 'RVA22U64 Mandatory Extensions' section:
6
7
"B Bit-manipulation instructions
8
9
Note: The B extension comprises the Zba, Zbb, and Zbs extensions. At the
10
time of RVA22U64's ratification, the B extension had not yet been
11
defined, and so RVA22U64 explicitly mandated Zba, Zbb, and Zbs instead.
12
Mandating B is equivalent."
13
14
It is also equivalent to QEMU (see riscv_cpu_validate_b() in
15
target/riscv/tcg/tcg-cpu.c).
16
17
Finally, RVA23U64 [2] directly mentions RVB as a mandatory extension,
18
not citing zba/zbb/zbs.
19
20
To make it clear that RVA23U64 will extend RVA22U64 (i.e. RVA22 is a
21
parent of RVA23), use RVB in RVA22U64 as well.
22
23
(bios-tables-test change: RVB added to riscv,isa)
24
25
[1] https://github.com/riscv/riscv-profiles/blob/main/src/profiles.adoc#61-rva22u64-profile
26
[2] https://github.com/riscv/riscv-profiles/blob/main/src/rva23-profile.adoc#rva23u64-profile
27
5
28
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
6
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
29
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
7
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
30
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
31
Message-ID: <20250115184316.2344583-3-dbarboza@ventanamicro.com>
9
Message-Id: <20230720132424.371132-3-dbarboza@ventanamicro.com>
32
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
33
---
11
---
34
target/riscv/cpu.c | 2 +-
12
target/riscv/cpu.c | 1 +
35
tests/data/acpi/riscv64/virt/RHCT | Bin 398 -> 400 bytes
13
1 file changed, 1 insertion(+)
36
2 files changed, 1 insertion(+), 1 deletion(-)
37
14
38
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
15
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
39
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
40
--- a/target/riscv/cpu.c
17
--- a/target/riscv/cpu.c
41
+++ b/target/riscv/cpu.c
18
+++ b/target/riscv/cpu.c
42
@@ -XXX,XX +XXX,XX @@ static const PropertyInfo prop_marchid = {
19
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
43
static RISCVCPUProfile RVA22U64 = {
20
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
44
.parent = NULL,
21
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
45
.name = "rva22u64",
22
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
46
- .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU,
23
+ ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp),
47
+ .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU,
24
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
48
.priv_spec = RISCV_PROFILE_ATTR_UNUSED,
25
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
49
.satp_mode = RISCV_PROFILE_ATTR_UNUSED,
26
ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
50
.ext_offsets = {
51
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
52
index XXXXXXX..XXXXXXX 100644
53
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
54
--
27
--
55
2.48.1
28
2.41.0
diff view generated by jsdifflib
1
From: Max Chou <max.chou@sifive.com>
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
2
3
According to the Vector Reduction Operations section in the RISC-V "V"
3
Commit bef6f008b98(accel/tcg: Return bool from page_check_range) converts
4
Vector Extension spec,
4
integer return value to bool type. However, it wrongly converted the use
5
"If vl=0, no operation is performed and the destination register is not
5
of the API in riscv fault-only-first, where page_check_range < = 0, should
6
updated."
6
be converted to !page_check_range.
7
7
8
The vd should be updated when vl is larger than 0.
8
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
9
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Fixes: fe5c9ab1fc ("target/riscv: vector single-width integer reduction instructions")
10
Message-ID: <20230729031618.821-1-zhiwei_liu@linux.alibaba.com>
11
Fixes: f714361ed7 ("target/riscv: rvv-1.0: implement vstart CSR")
12
Signed-off-by: Max Chou <max.chou@sifive.com>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Message-ID: <20250124101452.2519171-1-max.chou@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
12
---
17
target/riscv/vector_helper.c | 8 ++++++--
13
target/riscv/vector_helper.c | 2 +-
18
1 file changed, 6 insertions(+), 2 deletions(-)
14
1 file changed, 1 insertion(+), 1 deletion(-)
19
15
20
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
16
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
21
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/vector_helper.c
18
--- a/target/riscv/vector_helper.c
23
+++ b/target/riscv/vector_helper.c
19
+++ b/target/riscv/vector_helper.c
24
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
20
@@ -XXX,XX +XXX,XX @@ vext_ldff(void *vd, void *v0, target_ulong base,
25
} \
21
cpu_mmu_index(env, false));
26
s1 = OP(s1, (TD)s2); \
22
if (host) {
27
} \
23
#ifdef CONFIG_USER_ONLY
28
- *((TD *)vd + HD(0)) = s1; \
24
- if (page_check_range(addr, offset, PAGE_READ)) {
29
+ if (vl > 0) { \
25
+ if (!page_check_range(addr, offset, PAGE_READ)) {
30
+ *((TD *)vd + HD(0)) = s1; \
26
vl = i;
31
+ } \
27
goto ProbeSuccess;
32
env->vstart = 0; \
28
}
33
/* set tail elements to 1s */ \
34
vext_set_elems_1s(vd, vta, esz, vlenb); \
35
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
36
} \
37
s1 = OP(s1, (TD)s2, &env->fp_status); \
38
} \
39
- *((TD *)vd + HD(0)) = s1; \
40
+ if (vl > 0) { \
41
+ *((TD *)vd + HD(0)) = s1; \
42
+ } \
43
env->vstart = 0; \
44
/* set tail elements to 1s */ \
45
vext_set_elems_1s(vd, vta, esz, vlenb); \
46
--
29
--
47
2.48.1
30
2.41.0
diff view generated by jsdifflib
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
From: Ard Biesheuvel <ardb@kernel.org>
2
2
3
Add the relevant HPM (High Performance Monitor) bits that we'll be using
3
The AES MixColumns and InvMixColumns operations are relatively
4
in the next patches.
4
expensive 4x4 matrix multiplications in GF(2^8), which is why C
5
implementations usually rely on precomputed lookup tables rather than
6
performing the calculations on demand.
5
7
6
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
8
Given that we already carry those tables in QEMU, we can just grab the
7
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
right value in the implementation of the RISC-V AES32 instructions. Note
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
10
that the tables in question are permuted according to the respective
9
Message-ID: <20250224190826.1858473-3-dbarboza@ventanamicro.com>
11
Sbox, so we can omit the Sbox lookup as well in this case.
12
13
Cc: Richard Henderson <richard.henderson@linaro.org>
14
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
15
Cc: Zewen Ye <lustrew@foxmail.com>
16
Cc: Weiwei Li <liweiwei@iscas.ac.cn>
17
Cc: Junqiang Wang <wangjunqiang@iscas.ac.cn>
18
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
19
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
20
Message-ID: <20230731084043.1791984-1-ardb@kernel.org>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
22
---
12
hw/riscv/riscv-iommu-bits.h | 47 +++++++++++++++++++++++++++++++++++++
23
include/crypto/aes.h | 7 +++++++
13
1 file changed, 47 insertions(+)
24
crypto/aes.c | 4 ++--
25
target/riscv/crypto_helper.c | 34 ++++------------------------------
26
3 files changed, 13 insertions(+), 32 deletions(-)
14
27
15
diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h
28
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
16
index XXXXXXX..XXXXXXX 100644
29
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/riscv/riscv-iommu-bits.h
30
--- a/include/crypto/aes.h
18
+++ b/hw/riscv/riscv-iommu-bits.h
31
+++ b/include/crypto/aes.h
19
@@ -XXX,XX +XXX,XX @@ struct riscv_iommu_pq_record {
32
@@ -XXX,XX +XXX,XX @@ void AES_decrypt(const unsigned char *in, unsigned char *out,
20
#define RISCV_IOMMU_CAP_ATS BIT_ULL(25)
33
extern const uint8_t AES_sbox[256];
21
#define RISCV_IOMMU_CAP_T2GPA BIT_ULL(26)
34
extern const uint8_t AES_isbox[256];
22
#define RISCV_IOMMU_CAP_IGS GENMASK_ULL(29, 28)
35
23
+#define RISCV_IOMMU_CAP_HPM BIT_ULL(30)
36
+/*
24
#define RISCV_IOMMU_CAP_DBG BIT_ULL(31)
37
+AES_Te0[x] = S [x].[02, 01, 01, 03];
25
#define RISCV_IOMMU_CAP_PAS GENMASK_ULL(37, 32)
38
+AES_Td0[x] = Si[x].[0e, 09, 0d, 0b];
26
#define RISCV_IOMMU_CAP_PD8 BIT_ULL(38)
39
+*/
27
@@ -XXX,XX +XXX,XX @@ enum {
40
+
28
RISCV_IOMMU_INTR_COUNT
41
+extern const uint32_t AES_Te0[256], AES_Td0[256];
42
+
43
#endif
44
diff --git a/crypto/aes.c b/crypto/aes.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/crypto/aes.c
47
+++ b/crypto/aes.c
48
@@ -XXX,XX +XXX,XX @@ AES_Td3[x] = Si[x].[09, 0d, 0b, 0e];
49
AES_Td4[x] = Si[x].[01, 01, 01, 01];
50
*/
51
52
-static const uint32_t AES_Te0[256] = {
53
+const uint32_t AES_Te0[256] = {
54
0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
55
0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
56
0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
57
@@ -XXX,XX +XXX,XX @@ static const uint32_t AES_Te4[256] = {
58
0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U,
29
};
59
};
30
60
31
+#define RISCV_IOMMU_IOCOUNT_NUM 31
61
-static const uint32_t AES_Td0[256] = {
32
+
62
+const uint32_t AES_Td0[256] = {
33
+/* 5.19 Performance monitoring counter overflow status (32bits) */
63
0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
34
+#define RISCV_IOMMU_REG_IOCOUNTOVF 0x0058
64
0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
35
+#define RISCV_IOMMU_IOCOUNTOVF_CY BIT(0)
65
0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
36
+
66
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
37
+/* 5.20 Performance monitoring counter inhibits (32bits) */
67
index XXXXXXX..XXXXXXX 100644
38
+#define RISCV_IOMMU_REG_IOCOUNTINH 0x005C
68
--- a/target/riscv/crypto_helper.c
39
+#define RISCV_IOMMU_IOCOUNTINH_CY BIT(0)
69
+++ b/target/riscv/crypto_helper.c
40
+
70
@@ -XXX,XX +XXX,XX @@
41
+/* 5.21 Performance monitoring cycles counter (64bits) */
71
#include "crypto/aes-round.h"
42
+#define RISCV_IOMMU_REG_IOHPMCYCLES 0x0060
72
#include "crypto/sm4.h"
43
+#define RISCV_IOMMU_IOHPMCYCLES_COUNTER GENMASK_ULL(62, 0)
73
44
+#define RISCV_IOMMU_IOHPMCYCLES_OVF BIT_ULL(63)
74
-#define AES_XTIME(a) \
45
+
75
- ((a << 1) ^ ((a & 0x80) ? 0x1b : 0))
46
+/* 5.22 Performance monitoring event counters (31 * 64bits) */
76
-
47
+#define RISCV_IOMMU_REG_IOHPMCTR_BASE 0x0068
77
-#define AES_GFMUL(a, b) (( \
48
+#define RISCV_IOMMU_REG_IOHPMCTR(_n) \
78
- (((b) & 0x1) ? (a) : 0) ^ \
49
+ (RISCV_IOMMU_REG_IOHPMCTR_BASE + (_n * 0x8))
79
- (((b) & 0x2) ? AES_XTIME(a) : 0) ^ \
50
+
80
- (((b) & 0x4) ? AES_XTIME(AES_XTIME(a)) : 0) ^ \
51
+/* 5.23 Performance monitoring event selectors (31 * 64bits) */
81
- (((b) & 0x8) ? AES_XTIME(AES_XTIME(AES_XTIME(a))) : 0)) & 0xFF)
52
+#define RISCV_IOMMU_REG_IOHPMEVT_BASE 0x0160
82
-
53
+#define RISCV_IOMMU_REG_IOHPMEVT(_n) \
83
-static inline uint32_t aes_mixcolumn_byte(uint8_t x, bool fwd)
54
+ (RISCV_IOMMU_REG_IOHPMEVT_BASE + (_n * 0x8))
84
-{
55
+#define RISCV_IOMMU_IOHPMEVT_EVENT_ID GENMASK_ULL(14, 0)
85
- uint32_t u;
56
+#define RISCV_IOMMU_IOHPMEVT_DMASK BIT_ULL(15)
86
-
57
+#define RISCV_IOMMU_IOHPMEVT_PID_PSCID GENMASK_ULL(35, 16)
87
- if (fwd) {
58
+#define RISCV_IOMMU_IOHPMEVT_DID_GSCID GENMASK_ULL(59, 36)
88
- u = (AES_GFMUL(x, 3) << 24) | (x << 16) | (x << 8) |
59
+#define RISCV_IOMMU_IOHPMEVT_PV_PSCV BIT_ULL(60)
89
- (AES_GFMUL(x, 2) << 0);
60
+#define RISCV_IOMMU_IOHPMEVT_DV_GSCV BIT_ULL(61)
90
- } else {
61
+#define RISCV_IOMMU_IOHPMEVT_IDT BIT_ULL(62)
91
- u = (AES_GFMUL(x, 0xb) << 24) | (AES_GFMUL(x, 0xd) << 16) |
62
+#define RISCV_IOMMU_IOHPMEVT_OF BIT_ULL(63)
92
- (AES_GFMUL(x, 0x9) << 8) | (AES_GFMUL(x, 0xe) << 0);
63
+
93
- }
64
+enum RISCV_IOMMU_HPMEVENT_id {
94
- return u;
65
+ RISCV_IOMMU_HPMEVENT_INVALID = 0,
95
-}
66
+ RISCV_IOMMU_HPMEVENT_URQ = 1,
96
-
67
+ RISCV_IOMMU_HPMEVENT_TRQ = 2,
97
#define sext32_xlen(x) (target_ulong)(int32_t)(x)
68
+ RISCV_IOMMU_HPMEVENT_ATS_RQ = 3,
98
69
+ RISCV_IOMMU_HPMEVENT_TLB_MISS = 4,
99
static inline target_ulong aes32_operation(target_ulong shamt,
70
+ RISCV_IOMMU_HPMEVENT_DD_WALK = 5,
100
@@ -XXX,XX +XXX,XX @@ static inline target_ulong aes32_operation(target_ulong shamt,
71
+ RISCV_IOMMU_HPMEVENT_PD_WALK = 6,
101
bool enc, bool mix)
72
+ RISCV_IOMMU_HPMEVENT_S_VS_WALKS = 7,
102
{
73
+ RISCV_IOMMU_HPMEVENT_G_WALKS = 8,
103
uint8_t si = rs2 >> shamt;
74
+ RISCV_IOMMU_HPMEVENT_MAX = 9
104
- uint8_t so;
75
+};
105
uint32_t mixed;
76
+
106
target_ulong res;
77
/* 5.24 Translation request IOVA (64bits) */
107
78
#define RISCV_IOMMU_REG_TR_REQ_IOVA 0x0258
108
if (enc) {
79
109
- so = AES_sbox[si];
110
if (mix) {
111
- mixed = aes_mixcolumn_byte(so, true);
112
+ mixed = be32_to_cpu(AES_Te0[si]);
113
} else {
114
- mixed = so;
115
+ mixed = AES_sbox[si];
116
}
117
} else {
118
- so = AES_isbox[si];
119
if (mix) {
120
- mixed = aes_mixcolumn_byte(so, false);
121
+ mixed = be32_to_cpu(AES_Td0[si]);
122
} else {
123
- mixed = so;
124
+ mixed = AES_isbox[si];
125
}
126
}
127
mixed = rol32(mixed, shamt);
80
--
128
--
81
2.48.1
129
2.41.0
130
131
diff view generated by jsdifflib
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
The HPM (Hardware Performance Monitor) support consists of almost 7
3
Take some functions/macros out of `vector_helper` and put them in a new
4
hundred lines that would be put on top of the base riscv-iommu
4
module called `vector_internals`. This ensures they can be used by both
5
emulation.
5
vector and vector-crypto helpers (latter implemented in proceeding
6
commits).
6
7
7
To avoid clogging riscv-iommu.c, add a separated riscv-iommu-hpm file
8
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
8
that will contain HPM specific code.
9
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
9
10
Signed-off-by: Max Chou <max.chou@sifive.com>
10
We'll start by adding riscv_iommu_hpmcycle_read(), a helper that will be
11
called during the riscv_iommu_mmio_read() callback.
12
13
This change will have no effect on the existing emulation since we're
14
not declaring HPM feature support.
15
16
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
17
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
18
Acked-by: Alistair Francis <alistair.francis@wdc.com>
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
19
Message-ID: <20250224190826.1858473-4-dbarboza@ventanamicro.com>
12
Message-ID: <20230711165917.2629866-2-max.chou@sifive.com>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
---
14
---
22
hw/riscv/riscv-iommu-hpm.h | 27 +++++++++++++++++++
15
target/riscv/vector_internals.h | 182 +++++++++++++++++++++++++++++
23
hw/riscv/riscv-iommu.h | 4 +++
16
target/riscv/vector_helper.c | 201 +-------------------------------
24
hw/riscv/riscv-iommu-hpm.c | 54 ++++++++++++++++++++++++++++++++++++++
17
target/riscv/vector_internals.c | 81 +++++++++++++
25
hw/riscv/riscv-iommu.c | 24 ++++++++++++++++-
18
target/riscv/meson.build | 1 +
26
hw/riscv/meson.build | 3 ++-
19
4 files changed, 265 insertions(+), 200 deletions(-)
27
5 files changed, 110 insertions(+), 2 deletions(-)
20
create mode 100644 target/riscv/vector_internals.h
28
create mode 100644 hw/riscv/riscv-iommu-hpm.h
21
create mode 100644 target/riscv/vector_internals.c
29
create mode 100644 hw/riscv/riscv-iommu-hpm.c
30
22
31
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
23
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
32
new file mode 100644
24
new file mode 100644
33
index XXXXXXX..XXXXXXX
25
index XXXXXXX..XXXXXXX
34
--- /dev/null
26
--- /dev/null
35
+++ b/hw/riscv/riscv-iommu-hpm.h
27
+++ b/target/riscv/vector_internals.h
36
@@ -XXX,XX +XXX,XX @@
28
@@ -XXX,XX +XXX,XX @@
37
+/*
29
+/*
38
+ * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
30
+ * RISC-V Vector Extension Internals
39
+ *
31
+ *
40
+ * Copyright (C) 2022-2023 Rivos Inc.
32
+ * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
41
+ *
33
+ *
42
+ * This program is free software; you can redistribute it and/or modify it
34
+ * This program is free software; you can redistribute it and/or modify it
43
+ * under the terms and conditions of the GNU General Public License,
35
+ * under the terms and conditions of the GNU General Public License,
44
+ * version 2 or later, as published by the Free Software Foundation.
36
+ * version 2 or later, as published by the Free Software Foundation.
45
+ *
37
+ *
46
+ * This program is distributed in the hope that it will be useful,
38
+ * This program is distributed in the hope it will be useful, but WITHOUT
47
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
39
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
48
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
40
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
49
+ * GNU General Public License for more details.
41
+ * more details.
50
+ *
42
+ *
51
+ * You should have received a copy of the GNU General Public License along
43
+ * You should have received a copy of the GNU General Public License along with
52
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
44
+ * this program. If not, see <http://www.gnu.org/licenses/>.
53
+ */
45
+ */
54
+
46
+
55
+#ifndef HW_RISCV_IOMMU_HPM_H
47
+#ifndef TARGET_RISCV_VECTOR_INTERNALS_H
56
+#define HW_RISCV_IOMMU_HPM_H
48
+#define TARGET_RISCV_VECTOR_INTERNALS_H
57
+
49
+
58
+#include "qom/object.h"
50
+#include "qemu/osdep.h"
59
+#include "hw/riscv/riscv-iommu.h"
51
+#include "qemu/bitops.h"
60
+
52
+#include "cpu.h"
61
+uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
53
+#include "tcg/tcg-gvec-desc.h"
62
+
54
+#include "internals.h"
55
+
56
+static inline uint32_t vext_nf(uint32_t desc)
57
+{
58
+ return FIELD_EX32(simd_data(desc), VDATA, NF);
59
+}
60
+
61
+/*
62
+ * Note that vector data is stored in host-endian 64-bit chunks,
63
+ * so addressing units smaller than that needs a host-endian fixup.
64
+ */
65
+#if HOST_BIG_ENDIAN
66
+#define H1(x) ((x) ^ 7)
67
+#define H1_2(x) ((x) ^ 6)
68
+#define H1_4(x) ((x) ^ 4)
69
+#define H2(x) ((x) ^ 3)
70
+#define H4(x) ((x) ^ 1)
71
+#define H8(x) ((x))
72
+#else
73
+#define H1(x) (x)
74
+#define H1_2(x) (x)
75
+#define H1_4(x) (x)
76
+#define H2(x) (x)
77
+#define H4(x) (x)
78
+#define H8(x) (x)
63
+#endif
79
+#endif
64
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
80
+
81
+/*
82
+ * Encode LMUL to lmul as following:
83
+ * LMUL vlmul lmul
84
+ * 1 000 0
85
+ * 2 001 1
86
+ * 4 010 2
87
+ * 8 011 3
88
+ * - 100 -
89
+ * 1/8 101 -3
90
+ * 1/4 110 -2
91
+ * 1/2 111 -1
92
+ */
93
+static inline int32_t vext_lmul(uint32_t desc)
94
+{
95
+ return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
96
+}
97
+
98
+static inline uint32_t vext_vm(uint32_t desc)
99
+{
100
+ return FIELD_EX32(simd_data(desc), VDATA, VM);
101
+}
102
+
103
+static inline uint32_t vext_vma(uint32_t desc)
104
+{
105
+ return FIELD_EX32(simd_data(desc), VDATA, VMA);
106
+}
107
+
108
+static inline uint32_t vext_vta(uint32_t desc)
109
+{
110
+ return FIELD_EX32(simd_data(desc), VDATA, VTA);
111
+}
112
+
113
+static inline uint32_t vext_vta_all_1s(uint32_t desc)
114
+{
115
+ return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S);
116
+}
117
+
118
+/*
119
+ * Earlier designs (pre-0.9) had a varying number of bits
120
+ * per mask value (MLEN). In the 0.9 design, MLEN=1.
121
+ * (Section 4.5)
122
+ */
123
+static inline int vext_elem_mask(void *v0, int index)
124
+{
125
+ int idx = index / 64;
126
+ int pos = index % 64;
127
+ return (((uint64_t *)v0)[idx] >> pos) & 1;
128
+}
129
+
130
+/*
131
+ * Get number of total elements, including prestart, body and tail elements.
132
+ * Note that when LMUL < 1, the tail includes the elements past VLMAX that
133
+ * are held in the same vector register.
134
+ */
135
+static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
136
+ uint32_t esz)
137
+{
138
+ uint32_t vlenb = simd_maxsz(desc);
139
+ uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
140
+ int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 :
141
+ ctzl(esz) - ctzl(sew) + vext_lmul(desc);
142
+ return (vlenb << emul) / esz;
143
+}
144
+
145
+/* set agnostic elements to 1s */
146
+void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
147
+ uint32_t tot);
148
+
149
+/* expand macro args before macro */
150
+#define RVVCALL(macro, ...) macro(__VA_ARGS__)
151
+
152
+/* (TD, T1, T2, TX1, TX2) */
153
+#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
154
+#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
155
+#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
156
+#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
157
+
158
+/* operation of two vector elements */
159
+typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
160
+
161
+#define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
162
+static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
163
+{ \
164
+ TX1 s1 = *((T1 *)vs1 + HS1(i)); \
165
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
166
+ *((TD *)vd + HD(i)) = OP(s2, s1); \
167
+}
168
+
169
+void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
170
+ CPURISCVState *env, uint32_t desc,
171
+ opivv2_fn *fn, uint32_t esz);
172
+
173
+/* generate the helpers for OPIVV */
174
+#define GEN_VEXT_VV(NAME, ESZ) \
175
+void HELPER(NAME)(void *vd, void *v0, void *vs1, \
176
+ void *vs2, CPURISCVState *env, \
177
+ uint32_t desc) \
178
+{ \
179
+ do_vext_vv(vd, v0, vs1, vs2, env, desc, \
180
+ do_##NAME, ESZ); \
181
+}
182
+
183
+typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
184
+
185
+/*
186
+ * (T1)s1 gives the real operator type.
187
+ * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
188
+ */
189
+#define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
190
+static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
191
+{ \
192
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
193
+ *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
194
+}
195
+
196
+void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
197
+ CPURISCVState *env, uint32_t desc,
198
+ opivx2_fn fn, uint32_t esz);
199
+
200
+/* generate the helpers for OPIVX */
201
+#define GEN_VEXT_VX(NAME, ESZ) \
202
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
203
+ void *vs2, CPURISCVState *env, \
204
+ uint32_t desc) \
205
+{ \
206
+ do_vext_vx(vd, v0, s1, vs2, env, desc, \
207
+ do_##NAME, ESZ); \
208
+}
209
+
210
+#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
211
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
65
index XXXXXXX..XXXXXXX 100644
212
index XXXXXXX..XXXXXXX 100644
66
--- a/hw/riscv/riscv-iommu.h
213
--- a/target/riscv/vector_helper.c
67
+++ b/hw/riscv/riscv-iommu.h
214
+++ b/target/riscv/vector_helper.c
68
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
215
@@ -XXX,XX +XXX,XX @@
69
216
#include "fpu/softfloat.h"
70
QLIST_ENTRY(RISCVIOMMUState) iommus;
217
#include "tcg/tcg-gvec-desc.h"
71
QLIST_HEAD(, RISCVIOMMUSpace) spaces;
218
#include "internals.h"
72
+
219
+#include "vector_internals.h"
73
+ /* HPM cycle counter */
220
#include <math.h>
74
+ uint64_t hpmcycle_val; /* Current value of cycle register */
221
75
+ uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
222
target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
76
};
223
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1,
77
224
return vl;
78
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
225
}
79
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
226
227
-/*
228
- * Note that vector data is stored in host-endian 64-bit chunks,
229
- * so addressing units smaller than that needs a host-endian fixup.
230
- */
231
-#if HOST_BIG_ENDIAN
232
-#define H1(x) ((x) ^ 7)
233
-#define H1_2(x) ((x) ^ 6)
234
-#define H1_4(x) ((x) ^ 4)
235
-#define H2(x) ((x) ^ 3)
236
-#define H4(x) ((x) ^ 1)
237
-#define H8(x) ((x))
238
-#else
239
-#define H1(x) (x)
240
-#define H1_2(x) (x)
241
-#define H1_4(x) (x)
242
-#define H2(x) (x)
243
-#define H4(x) (x)
244
-#define H8(x) (x)
245
-#endif
246
-
247
-static inline uint32_t vext_nf(uint32_t desc)
248
-{
249
- return FIELD_EX32(simd_data(desc), VDATA, NF);
250
-}
251
-
252
-static inline uint32_t vext_vm(uint32_t desc)
253
-{
254
- return FIELD_EX32(simd_data(desc), VDATA, VM);
255
-}
256
-
257
-/*
258
- * Encode LMUL to lmul as following:
259
- * LMUL vlmul lmul
260
- * 1 000 0
261
- * 2 001 1
262
- * 4 010 2
263
- * 8 011 3
264
- * - 100 -
265
- * 1/8 101 -3
266
- * 1/4 110 -2
267
- * 1/2 111 -1
268
- */
269
-static inline int32_t vext_lmul(uint32_t desc)
270
-{
271
- return sextract32(FIELD_EX32(simd_data(desc), VDATA, LMUL), 0, 3);
272
-}
273
-
274
-static inline uint32_t vext_vta(uint32_t desc)
275
-{
276
- return FIELD_EX32(simd_data(desc), VDATA, VTA);
277
-}
278
-
279
-static inline uint32_t vext_vma(uint32_t desc)
280
-{
281
- return FIELD_EX32(simd_data(desc), VDATA, VMA);
282
-}
283
-
284
-static inline uint32_t vext_vta_all_1s(uint32_t desc)
285
-{
286
- return FIELD_EX32(simd_data(desc), VDATA, VTA_ALL_1S);
287
-}
288
-
289
/*
290
* Get the maximum number of elements can be operated.
291
*
292
@@ -XXX,XX +XXX,XX @@ static inline uint32_t vext_max_elems(uint32_t desc, uint32_t log2_esz)
293
return scale < 0 ? vlenb >> -scale : vlenb << scale;
294
}
295
296
-/*
297
- * Get number of total elements, including prestart, body and tail elements.
298
- * Note that when LMUL < 1, the tail includes the elements past VLMAX that
299
- * are held in the same vector register.
300
- */
301
-static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
302
- uint32_t esz)
303
-{
304
- uint32_t vlenb = simd_maxsz(desc);
305
- uint32_t sew = 1 << FIELD_EX64(env->vtype, VTYPE, VSEW);
306
- int8_t emul = ctzl(esz) - ctzl(sew) + vext_lmul(desc) < 0 ? 0 :
307
- ctzl(esz) - ctzl(sew) + vext_lmul(desc);
308
- return (vlenb << emul) / esz;
309
-}
310
-
311
static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
312
{
313
return (addr & ~env->cur_pmmask) | env->cur_pmbase;
314
@@ -XXX,XX +XXX,XX @@ static void probe_pages(CPURISCVState *env, target_ulong addr,
315
}
316
}
317
318
-/* set agnostic elements to 1s */
319
-static void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
320
- uint32_t tot)
321
-{
322
- if (is_agnostic == 0) {
323
- /* policy undisturbed */
324
- return;
325
- }
326
- if (tot - cnt == 0) {
327
- return;
328
- }
329
- memset(base + cnt, -1, tot - cnt);
330
-}
331
-
332
static inline void vext_set_elem_mask(void *v0, int index,
333
uint8_t value)
334
{
335
@@ -XXX,XX +XXX,XX @@ static inline void vext_set_elem_mask(void *v0, int index,
336
((uint64_t *)v0)[idx] = deposit64(old, pos, 1, value);
337
}
338
339
-/*
340
- * Earlier designs (pre-0.9) had a varying number of bits
341
- * per mask value (MLEN). In the 0.9 design, MLEN=1.
342
- * (Section 4.5)
343
- */
344
-static inline int vext_elem_mask(void *v0, int index)
345
-{
346
- int idx = index / 64;
347
- int pos = index % 64;
348
- return (((uint64_t *)v0)[idx] >> pos) & 1;
349
-}
350
-
351
/* elements operations for load and store */
352
typedef void vext_ldst_elem_fn(CPURISCVState *env, abi_ptr addr,
353
uint32_t idx, void *vd, uintptr_t retaddr);
354
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
355
* Vector Integer Arithmetic Instructions
356
*/
357
358
-/* expand macro args before macro */
359
-#define RVVCALL(macro, ...) macro(__VA_ARGS__)
360
-
361
/* (TD, T1, T2, TX1, TX2) */
362
#define OP_SSS_B int8_t, int8_t, int8_t, int8_t, int8_t
363
#define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t
364
#define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t
365
#define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t
366
-#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
367
-#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
368
-#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
369
-#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
370
#define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
371
#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
372
#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
373
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
374
#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
375
#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
376
377
-/* operation of two vector elements */
378
-typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
379
-
380
-#define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
381
-static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
382
-{ \
383
- TX1 s1 = *((T1 *)vs1 + HS1(i)); \
384
- TX2 s2 = *((T2 *)vs2 + HS2(i)); \
385
- *((TD *)vd + HD(i)) = OP(s2, s1); \
386
-}
387
#define DO_SUB(N, M) (N - M)
388
#define DO_RSUB(N, M) (M - N)
389
390
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVV2, vsub_vv_h, OP_SSS_H, H2, H2, H2, DO_SUB)
391
RVVCALL(OPIVV2, vsub_vv_w, OP_SSS_W, H4, H4, H4, DO_SUB)
392
RVVCALL(OPIVV2, vsub_vv_d, OP_SSS_D, H8, H8, H8, DO_SUB)
393
394
-static void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
395
- CPURISCVState *env, uint32_t desc,
396
- opivv2_fn *fn, uint32_t esz)
397
-{
398
- uint32_t vm = vext_vm(desc);
399
- uint32_t vl = env->vl;
400
- uint32_t total_elems = vext_get_total_elems(env, desc, esz);
401
- uint32_t vta = vext_vta(desc);
402
- uint32_t vma = vext_vma(desc);
403
- uint32_t i;
404
-
405
- for (i = env->vstart; i < vl; i++) {
406
- if (!vm && !vext_elem_mask(v0, i)) {
407
- /* set masked-off elements to 1s */
408
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
409
- continue;
410
- }
411
- fn(vd, vs1, vs2, i);
412
- }
413
- env->vstart = 0;
414
- /* set tail elements to 1s */
415
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
416
-}
417
-
418
-/* generate the helpers for OPIVV */
419
-#define GEN_VEXT_VV(NAME, ESZ) \
420
-void HELPER(NAME)(void *vd, void *v0, void *vs1, \
421
- void *vs2, CPURISCVState *env, \
422
- uint32_t desc) \
423
-{ \
424
- do_vext_vv(vd, v0, vs1, vs2, env, desc, \
425
- do_##NAME, ESZ); \
426
-}
427
-
428
GEN_VEXT_VV(vadd_vv_b, 1)
429
GEN_VEXT_VV(vadd_vv_h, 2)
430
GEN_VEXT_VV(vadd_vv_w, 4)
431
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_VV(vsub_vv_h, 2)
432
GEN_VEXT_VV(vsub_vv_w, 4)
433
GEN_VEXT_VV(vsub_vv_d, 8)
434
435
-typedef void opivx2_fn(void *vd, target_long s1, void *vs2, int i);
436
-
437
-/*
438
- * (T1)s1 gives the real operator type.
439
- * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
440
- */
441
-#define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
442
-static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
443
-{ \
444
- TX2 s2 = *((T2 *)vs2 + HS2(i)); \
445
- *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
446
-}
447
448
RVVCALL(OPIVX2, vadd_vx_b, OP_SSS_B, H1, H1, DO_ADD)
449
RVVCALL(OPIVX2, vadd_vx_h, OP_SSS_H, H2, H2, DO_ADD)
450
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVX2, vrsub_vx_h, OP_SSS_H, H2, H2, DO_RSUB)
451
RVVCALL(OPIVX2, vrsub_vx_w, OP_SSS_W, H4, H4, DO_RSUB)
452
RVVCALL(OPIVX2, vrsub_vx_d, OP_SSS_D, H8, H8, DO_RSUB)
453
454
-static void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
455
- CPURISCVState *env, uint32_t desc,
456
- opivx2_fn fn, uint32_t esz)
457
-{
458
- uint32_t vm = vext_vm(desc);
459
- uint32_t vl = env->vl;
460
- uint32_t total_elems = vext_get_total_elems(env, desc, esz);
461
- uint32_t vta = vext_vta(desc);
462
- uint32_t vma = vext_vma(desc);
463
- uint32_t i;
464
-
465
- for (i = env->vstart; i < vl; i++) {
466
- if (!vm && !vext_elem_mask(v0, i)) {
467
- /* set masked-off elements to 1s */
468
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
469
- continue;
470
- }
471
- fn(vd, s1, vs2, i);
472
- }
473
- env->vstart = 0;
474
- /* set tail elements to 1s */
475
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
476
-}
477
-
478
-/* generate the helpers for OPIVX */
479
-#define GEN_VEXT_VX(NAME, ESZ) \
480
-void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
481
- void *vs2, CPURISCVState *env, \
482
- uint32_t desc) \
483
-{ \
484
- do_vext_vx(vd, v0, s1, vs2, env, desc, \
485
- do_##NAME, ESZ); \
486
-}
487
-
488
GEN_VEXT_VX(vadd_vx_b, 1)
489
GEN_VEXT_VX(vadd_vx_h, 2)
490
GEN_VEXT_VX(vadd_vx_w, 4)
491
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
80
new file mode 100644
492
new file mode 100644
81
index XXXXXXX..XXXXXXX
493
index XXXXXXX..XXXXXXX
82
--- /dev/null
494
--- /dev/null
83
+++ b/hw/riscv/riscv-iommu-hpm.c
495
+++ b/target/riscv/vector_internals.c
84
@@ -XXX,XX +XXX,XX @@
496
@@ -XXX,XX +XXX,XX @@
85
+/*
497
+/*
86
+ * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
498
+ * RISC-V Vector Extension Internals
87
+ *
499
+ *
88
+ * Copyright (C) 2022-2023 Rivos Inc.
500
+ * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
89
+ *
501
+ *
90
+ * This program is free software; you can redistribute it and/or modify it
502
+ * This program is free software; you can redistribute it and/or modify it
91
+ * under the terms and conditions of the GNU General Public License,
503
+ * under the terms and conditions of the GNU General Public License,
92
+ * version 2 or later, as published by the Free Software Foundation.
504
+ * version 2 or later, as published by the Free Software Foundation.
93
+ *
505
+ *
94
+ * This program is distributed in the hope that it will be useful,
506
+ * This program is distributed in the hope it will be useful, but WITHOUT
95
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
507
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
96
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
508
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
97
+ * GNU General Public License for more details.
509
+ * more details.
98
+ *
510
+ *
99
+ * You should have received a copy of the GNU General Public License along
511
+ * You should have received a copy of the GNU General Public License along with
100
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
512
+ * this program. If not, see <http://www.gnu.org/licenses/>.
101
+ */
513
+ */
102
+
514
+
103
+#include "qemu/osdep.h"
515
+#include "vector_internals.h"
104
+#include "qemu/timer.h"
516
+
105
+#include "cpu_bits.h"
517
+/* set agnostic elements to 1s */
106
+#include "riscv-iommu-hpm.h"
518
+void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
107
+#include "riscv-iommu.h"
519
+ uint32_t tot)
108
+#include "riscv-iommu-bits.h"
520
+{
109
+#include "trace.h"
521
+ if (is_agnostic == 0) {
110
+
522
+ /* policy undisturbed */
111
+/* For now we assume IOMMU HPM frequency to be 1GHz so 1-cycle is of 1-ns. */
523
+ return;
112
+static inline uint64_t get_cycles(void)
113
+{
114
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
115
+}
116
+
117
+uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
118
+{
119
+ const uint64_t cycle = riscv_iommu_reg_get64(
120
+ s, RISCV_IOMMU_REG_IOHPMCYCLES);
121
+ const uint32_t inhibit = riscv_iommu_reg_get32(
122
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
123
+ const uint64_t ctr_prev = s->hpmcycle_prev;
124
+ const uint64_t ctr_val = s->hpmcycle_val;
125
+
126
+ if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
127
+ /*
128
+ * Counter should not increment if inhibit bit is set. We can't really
129
+ * stop the QEMU_CLOCK_VIRTUAL, so we just return the last updated
130
+ * counter value to indicate that counter was not incremented.
131
+ */
132
+ return (ctr_val & RISCV_IOMMU_IOHPMCYCLES_COUNTER) |
133
+ (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
134
+ }
524
+ }
135
+
525
+ if (tot - cnt == 0) {
136
+ return (ctr_val + get_cycles() - ctr_prev) |
526
+ return ;
137
+ (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
527
+ }
138
+}
528
+ memset(base + cnt, -1, tot - cnt);
139
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
529
+}
530
+
531
+void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
532
+ CPURISCVState *env, uint32_t desc,
533
+ opivv2_fn *fn, uint32_t esz)
534
+{
535
+ uint32_t vm = vext_vm(desc);
536
+ uint32_t vl = env->vl;
537
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
538
+ uint32_t vta = vext_vta(desc);
539
+ uint32_t vma = vext_vma(desc);
540
+ uint32_t i;
541
+
542
+ for (i = env->vstart; i < vl; i++) {
543
+ if (!vm && !vext_elem_mask(v0, i)) {
544
+ /* set masked-off elements to 1s */
545
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
546
+ continue;
547
+ }
548
+ fn(vd, vs1, vs2, i);
549
+ }
550
+ env->vstart = 0;
551
+ /* set tail elements to 1s */
552
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
553
+}
554
+
555
+void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
556
+ CPURISCVState *env, uint32_t desc,
557
+ opivx2_fn fn, uint32_t esz)
558
+{
559
+ uint32_t vm = vext_vm(desc);
560
+ uint32_t vl = env->vl;
561
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
562
+ uint32_t vta = vext_vta(desc);
563
+ uint32_t vma = vext_vma(desc);
564
+ uint32_t i;
565
+
566
+ for (i = env->vstart; i < vl; i++) {
567
+ if (!vm && !vext_elem_mask(v0, i)) {
568
+ /* set masked-off elements to 1s */
569
+ vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
570
+ continue;
571
+ }
572
+ fn(vd, s1, vs2, i);
573
+ }
574
+ env->vstart = 0;
575
+ /* set tail elements to 1s */
576
+ vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
577
+}
578
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
140
index XXXXXXX..XXXXXXX 100644
579
index XXXXXXX..XXXXXXX 100644
141
--- a/hw/riscv/riscv-iommu.c
580
--- a/target/riscv/meson.build
142
+++ b/hw/riscv/riscv-iommu.c
581
+++ b/target/riscv/meson.build
143
@@ -XXX,XX +XXX,XX @@
582
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(files(
144
#include "cpu_bits.h"
583
'gdbstub.c',
145
#include "riscv-iommu.h"
584
'op_helper.c',
146
#include "riscv-iommu-bits.h"
585
'vector_helper.c',
147
+#include "riscv-iommu-hpm.h"
586
+ 'vector_internals.c',
148
#include "trace.h"
587
'bitmanip_helper.c',
149
588
'translate.c',
150
#define LIMIT_CACHE_CTX (1U << 7)
589
'm128_helper.c',
151
@@ -XXX,XX +XXX,XX @@ static MemTxResult riscv_iommu_mmio_read(void *opaque, hwaddr addr,
152
return MEMTX_ACCESS_ERROR;
153
}
154
155
- ptr = &s->regs_rw[addr];
156
+ /* Compute cycle register value. */
157
+ if ((addr & ~7) == RISCV_IOMMU_REG_IOHPMCYCLES) {
158
+ val = riscv_iommu_hpmcycle_read(s);
159
+ ptr = (uint8_t *)&val + (addr & 7);
160
+ } else if ((addr & ~3) == RISCV_IOMMU_REG_IOCOUNTOVF) {
161
+ /*
162
+ * Software can read RISCV_IOMMU_REG_IOCOUNTOVF before timer
163
+ * callback completes. In which case CY_OF bit in
164
+ * RISCV_IOMMU_IOHPMCYCLES_OVF would be 0. Here we take the
165
+ * CY_OF bit state from RISCV_IOMMU_REG_IOHPMCYCLES register as
166
+ * it's not dependent over the timer callback and is computed
167
+ * from cycle overflow.
168
+ */
169
+ val = ldq_le_p(&s->regs_rw[addr]);
170
+ val |= (riscv_iommu_hpmcycle_read(s) & RISCV_IOMMU_IOHPMCYCLES_OVF)
171
+ ? RISCV_IOMMU_IOCOUNTOVF_CY
172
+ : 0;
173
+ ptr = (uint8_t *)&val + (addr & 3);
174
+ } else {
175
+ ptr = &s->regs_rw[addr];
176
+ }
177
+
178
val = ldn_le_p(ptr, size);
179
180
*data = val;
181
diff --git a/hw/riscv/meson.build b/hw/riscv/meson.build
182
index XXXXXXX..XXXXXXX 100644
183
--- a/hw/riscv/meson.build
184
+++ b/hw/riscv/meson.build
185
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(when: 'CONFIG_SIFIVE_U', if_true: files('sifive_u.c'))
186
riscv_ss.add(when: 'CONFIG_SPIKE', if_true: files('spike.c'))
187
riscv_ss.add(when: 'CONFIG_MICROCHIP_PFSOC', if_true: files('microchip_pfsoc.c'))
188
riscv_ss.add(when: 'CONFIG_ACPI', if_true: files('virt-acpi-build.c'))
189
-riscv_ss.add(when: 'CONFIG_RISCV_IOMMU', if_true: files('riscv-iommu.c', 'riscv-iommu-pci.c', 'riscv-iommu-sys.c'))
190
+riscv_ss.add(when: 'CONFIG_RISCV_IOMMU', if_true: files(
191
+    'riscv-iommu.c', 'riscv-iommu-pci.c', 'riscv-iommu-sys.c', 'riscv-iommu-hpm.c'))
192
riscv_ss.add(when: 'CONFIG_MICROBLAZE_V', if_true: files('microblaze-v-generic.c'))
193
194
hw_arch += {'riscv': riscv_ss}
195
--
590
--
196
2.48.1
591
2.41.0
diff view generated by jsdifflib
1
From: Andrea Bolognani <abologna@redhat.com>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
This should make no difference from the functional point of
3
Refactor the non SEW-specific stuff out of `GEN_OPIVV_TRANS` into
4
view and it's just preparation for upcoming changes.
4
function `opivv_trans` (similar to `opivi_trans`). `opivv_trans` will be
5
used in proceeding vector-crypto commits.
5
6
6
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
7
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
7
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
10
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
9
Message-ID: <20250127182924.103510-2-abologna@redhat.com>
11
Signed-off-by: Max Chou <max.chou@sifive.com>
12
Message-ID: <20230711165917.2629866-3-max.chou@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
14
---
12
scripts/qemu-binfmt-conf.sh | 17 ++++++++++-------
15
target/riscv/insn_trans/trans_rvv.c.inc | 62 +++++++++++++------------
13
1 file changed, 10 insertions(+), 7 deletions(-)
16
1 file changed, 32 insertions(+), 30 deletions(-)
14
17
15
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
18
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
16
index XXXXXXX..XXXXXXX 100755
19
index XXXXXXX..XXXXXXX 100644
17
--- a/scripts/qemu-binfmt-conf.sh
20
--- a/target/riscv/insn_trans/trans_rvv.c.inc
18
+++ b/scripts/qemu-binfmt-conf.sh
21
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
19
@@ -XXX,XX +XXX,XX @@ qemu_set_binfmts() {
22
@@ -XXX,XX +XXX,XX @@ GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
20
mask=$(eval echo \$${cpu}_mask)
23
GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
21
family=$(eval echo \$${cpu}_family)
24
GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
22
25
23
+ target="$cpu"
26
+static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
24
+ if [ "$cpu" = "i486" ] ; then
27
+ gen_helper_gvec_4_ptr *fn, DisasContext *s)
25
+ target="i386"
28
+{
26
+ fi
29
+ uint32_t data = 0;
30
+ TCGLabel *over = gen_new_label();
31
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
32
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
27
+
33
+
28
+ qemu="$QEMU_PATH/qemu-$target$QEMU_SUFFIX"
34
+ data = FIELD_DP32(data, VDATA, VM, vm);
35
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
36
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
37
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
38
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
39
+ tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1),
40
+ vreg_ofs(s, vs2), cpu_env, s->cfg_ptr->vlen / 8,
41
+ s->cfg_ptr->vlen / 8, data, fn);
42
+ mark_vs_dirty(s);
43
+ gen_set_label(over);
44
+ return true;
45
+}
29
+
46
+
30
if [ "$magic" = "" ] || [ "$mask" = "" ] || [ "$family" = "" ] ; then
47
/* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
31
echo "INTERNAL ERROR: unknown cpu $cpu" 1>&2
48
/* OPIVV without GVEC IR */
32
continue
49
-#define GEN_OPIVV_TRANS(NAME, CHECK) \
33
fi
50
-static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
34
51
-{ \
35
- qemu="$QEMU_PATH/qemu-$cpu"
52
- if (CHECK(s, a)) { \
36
- if [ "$cpu" = "i486" ] ; then
53
- uint32_t data = 0; \
37
- qemu="$QEMU_PATH/qemu-i386"
54
- static gen_helper_gvec_4_ptr * const fns[4] = { \
38
+ if [ "$host_family" = "$family" ] ; then
55
- gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
39
+ continue
56
- gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
40
fi
57
- }; \
41
58
- TCGLabel *over = gen_new_label(); \
42
- qemu="$qemu$QEMU_SUFFIX"
59
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
43
- if [ "$host_family" != "$family" ] ; then
60
- tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
44
- $BINFMT_SET
61
- \
45
- fi
62
- data = FIELD_DP32(data, VDATA, VM, a->vm); \
46
+ $BINFMT_SET
63
- data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
47
done
64
- data = FIELD_DP32(data, VDATA, VTA, s->vta); \
65
- data = \
66
- FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
67
- data = FIELD_DP32(data, VDATA, VMA, s->vma); \
68
- tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
69
- vreg_ofs(s, a->rs1), \
70
- vreg_ofs(s, a->rs2), cpu_env, \
71
- s->cfg_ptr->vlen / 8, \
72
- s->cfg_ptr->vlen / 8, data, \
73
- fns[s->sew]); \
74
- mark_vs_dirty(s); \
75
- gen_set_label(over); \
76
- return true; \
77
- } \
78
- return false; \
79
+#define GEN_OPIVV_TRANS(NAME, CHECK) \
80
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
81
+{ \
82
+ if (CHECK(s, a)) { \
83
+ static gen_helper_gvec_4_ptr * const fns[4] = { \
84
+ gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
85
+ gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
86
+ }; \
87
+ return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
88
+ } \
89
+ return false; \
48
}
90
}
49
91
92
/*
50
--
93
--
51
2.48.1
94
2.41.0
diff view generated by jsdifflib
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
2
3
Now that we have every piece in place we can advertise CAP_HTM to
3
Remove the redundant "vl == 0" check which is already included within the vstart >= vl check, when vl == 0.
4
software, allowing any HPM aware driver to make use of the counters.
5
4
6
HPM is enabled/disabled via the 'hpm-counters' attribute. Default value
5
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
7
is 31, max value is also 31. Setting it to zero will disable HPM
6
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
8
support.
7
Signed-off-by: Max Chou <max.chou@sifive.com>
9
10
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Acked-by: Alistair Francis <alistair.francis@wdc.com>
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-ID: <20250224190826.1858473-10-dbarboza@ventanamicro.com>
9
Message-ID: <20230711165917.2629866-4-max.chou@sifive.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
11
---
16
hw/riscv/riscv-iommu.c | 21 +++++++++++++++++++++
12
target/riscv/insn_trans/trans_rvv.c.inc | 31 +------------------------
17
1 file changed, 21 insertions(+)
13
1 file changed, 1 insertion(+), 30 deletions(-)
18
14
19
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
15
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
20
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/riscv/riscv-iommu.c
17
--- a/target/riscv/insn_trans/trans_rvv.c.inc
22
+++ b/hw/riscv/riscv-iommu.c
18
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
23
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
19
@@ -XXX,XX +XXX,XX @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
24
RISCV_IOMMU_CAP_SV48X4 | RISCV_IOMMU_CAP_SV57X4;
20
TCGv_i32 desc;
21
22
TCGLabel *over = gen_new_label();
23
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
24
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
25
26
dest = tcg_temp_new_ptr();
27
@@ -XXX,XX +XXX,XX @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
28
TCGv_i32 desc;
29
30
TCGLabel *over = gen_new_label();
31
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
32
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
33
34
dest = tcg_temp_new_ptr();
35
@@ -XXX,XX +XXX,XX @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
36
TCGv_i32 desc;
37
38
TCGLabel *over = gen_new_label();
39
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
40
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
41
42
dest = tcg_temp_new_ptr();
43
@@ -XXX,XX +XXX,XX @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
44
TCGv_i32 desc;
45
46
TCGLabel *over = gen_new_label();
47
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
48
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
49
50
dest = tcg_temp_new_ptr();
51
@@ -XXX,XX +XXX,XX @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
52
return false;
25
}
53
}
26
54
27
+ if (s->hpm_cntrs > 0) {
55
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
28
+ /* Clip number of HPM counters to maximum supported (31). */
56
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
29
+ if (s->hpm_cntrs > RISCV_IOMMU_IOCOUNT_NUM) {
57
30
+ s->hpm_cntrs = RISCV_IOMMU_IOCOUNT_NUM;
58
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
31
+ }
59
@@ -XXX,XX +XXX,XX @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
32
+ /* Enable hardware performance monitor interface */
60
uint32_t data = 0;
33
+ s->cap |= RISCV_IOMMU_CAP_HPM;
61
34
+ }
62
TCGLabel *over = gen_new_label();
35
+
63
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
36
/* Out-of-reset translation mode: OFF (DMA disabled) BARE (passthrough) */
64
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
37
s->ddtp = set_field(0, RISCV_IOMMU_DDTP_MODE, s->enable_off ?
65
38
RISCV_IOMMU_DDTP_MODE_OFF : RISCV_IOMMU_DDTP_MODE_BARE);
66
dest = tcg_temp_new_ptr();
39
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
67
@@ -XXX,XX +XXX,XX @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
40
RISCV_IOMMU_TR_REQ_CTL_GO_BUSY);
68
uint32_t data = 0;
41
}
69
42
70
TCGLabel *over = gen_new_label();
43
+ /* If HPM registers are enabled. */
71
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
44
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
72
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
45
+ /* +1 for cycle counter bit. */
73
46
+ stl_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOCOUNTINH],
74
dest = tcg_temp_new_ptr();
47
+ ~((2 << s->hpm_cntrs) - 1));
75
@@ -XXX,XX +XXX,XX @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
48
+ stq_le_p(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCYCLES], 0);
76
if (checkfn(s, a)) {
49
+ memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMCTR_BASE],
77
uint32_t data = 0;
50
+ 0x00, s->hpm_cntrs * 8);
78
TCGLabel *over = gen_new_label();
51
+ memset(&s->regs_ro[RISCV_IOMMU_REG_IOHPMEVT_BASE],
79
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
52
+ 0x00, s->hpm_cntrs * 8);
80
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
53
+ }
81
54
+
82
data = FIELD_DP32(data, VDATA, VM, a->vm);
55
/* Memory region for downstream access, if specified. */
83
@@ -XXX,XX +XXX,XX @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
56
if (s->target_mr) {
84
if (opiwv_widen_check(s, a)) {
57
s->target_as = g_new0(AddressSpace, 1);
85
uint32_t data = 0;
86
TCGLabel *over = gen_new_label();
87
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
88
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
89
90
data = FIELD_DP32(data, VDATA, VM, a->vm);
91
@@ -XXX,XX +XXX,XX @@ static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
92
{
93
uint32_t data = 0;
94
TCGLabel *over = gen_new_label();
95
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
96
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
97
98
data = FIELD_DP32(data, VDATA, VM, vm);
99
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
100
gen_helper_##NAME##_w, \
101
}; \
102
TCGLabel *over = gen_new_label(); \
103
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
104
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
105
\
106
data = FIELD_DP32(data, VDATA, VM, a->vm); \
107
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
108
gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
109
};
110
TCGLabel *over = gen_new_label();
111
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
112
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
113
114
tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
115
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
116
vext_check_ss(s, a->rd, 0, 1)) {
117
TCGv s1;
118
TCGLabel *over = gen_new_label();
119
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
120
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
121
122
s1 = get_gpr(s, a->rs1, EXT_SIGN);
123
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
124
gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
125
};
126
TCGLabel *over = gen_new_label();
127
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
128
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
129
130
s1 = tcg_constant_i64(simm);
131
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
132
}; \
133
TCGLabel *over = gen_new_label(); \
134
gen_set_rm(s, RISCV_FRM_DYN); \
135
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
136
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
137
\
138
data = FIELD_DP32(data, VDATA, VM, a->vm); \
139
@@ -XXX,XX +XXX,XX @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
140
TCGv_i64 t1;
141
142
TCGLabel *over = gen_new_label();
143
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
144
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
145
146
dest = tcg_temp_new_ptr();
147
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
148
}; \
149
TCGLabel *over = gen_new_label(); \
150
gen_set_rm(s, RISCV_FRM_DYN); \
151
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
152
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);\
153
\
154
data = FIELD_DP32(data, VDATA, VM, a->vm); \
155
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
156
}; \
157
TCGLabel *over = gen_new_label(); \
158
gen_set_rm(s, RISCV_FRM_DYN); \
159
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
160
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
161
\
162
data = FIELD_DP32(data, VDATA, VM, a->vm); \
163
@@ -XXX,XX +XXX,XX @@ static bool do_opfv(DisasContext *s, arg_rmr *a,
164
uint32_t data = 0;
165
TCGLabel *over = gen_new_label();
166
gen_set_rm_chkfrm(s, rm);
167
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
168
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
169
170
data = FIELD_DP32(data, VDATA, VM, a->vm);
171
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
172
gen_helper_vmv_v_x_d,
173
};
174
TCGLabel *over = gen_new_label();
175
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
176
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
177
178
t1 = tcg_temp_new_i64();
179
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
180
}; \
181
TCGLabel *over = gen_new_label(); \
182
gen_set_rm_chkfrm(s, FRM); \
183
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
184
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
185
\
186
data = FIELD_DP32(data, VDATA, VM, a->vm); \
187
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
188
}; \
189
TCGLabel *over = gen_new_label(); \
190
gen_set_rm(s, RISCV_FRM_DYN); \
191
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
192
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
193
\
194
data = FIELD_DP32(data, VDATA, VM, a->vm); \
195
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
196
}; \
197
TCGLabel *over = gen_new_label(); \
198
gen_set_rm_chkfrm(s, FRM); \
199
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
200
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
201
\
202
data = FIELD_DP32(data, VDATA, VM, a->vm); \
203
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
204
}; \
205
TCGLabel *over = gen_new_label(); \
206
gen_set_rm_chkfrm(s, FRM); \
207
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
208
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
209
\
210
data = FIELD_DP32(data, VDATA, VM, a->vm); \
211
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \
212
uint32_t data = 0; \
213
gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
214
TCGLabel *over = gen_new_label(); \
215
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
216
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
217
\
218
data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
219
@@ -XXX,XX +XXX,XX @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
220
require_vm(a->vm, a->rd)) {
221
uint32_t data = 0;
222
TCGLabel *over = gen_new_label();
223
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
224
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
225
226
data = FIELD_DP32(data, VDATA, VM, a->vm);
227
@@ -XXX,XX +XXX,XX @@ static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
228
TCGv s1;
229
TCGLabel *over = gen_new_label();
230
231
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
232
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
233
234
t1 = tcg_temp_new_i64();
235
@@ -XXX,XX +XXX,XX @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
236
TCGv_i64 t1;
237
TCGLabel *over = gen_new_label();
238
239
- /* if vl == 0 or vstart >= vl, skip vector register write back */
240
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
241
+ /* if vstart >= vl, skip vector register write back */
242
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
243
244
/* NaN-box f[rs1] */
245
@@ -XXX,XX +XXX,XX @@ static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
246
uint32_t data = 0;
247
gen_helper_gvec_3_ptr *fn;
248
TCGLabel *over = gen_new_label();
249
- tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
250
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
251
252
static gen_helper_gvec_3_ptr * const fns[6][4] = {
58
--
253
--
59
2.48.1
254
2.41.0
diff view generated by jsdifflib
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
1
From: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
2
2
3
This commit adds logic to records CTR entries of different types
3
This commit adds support for the Zvbc vector-crypto extension, which
4
and adds required hooks in TCG and interrupt/Exception logic to
4
consists of the following instructions:
5
record events.
5
6
6
* vclmulh.[vx,vv]
7
This commit also adds support to invoke freeze CTR logic for breakpoint
7
* vclmul.[vx,vv]
8
exceptions and counter overflow interrupts.
8
9
9
Translation functions are defined in
10
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
11
`target/riscv/vcrypto_helper.c`.
12
Message-ID: <20250205-b4-ctr_upstream_v6-v6-4-439d8e06c8ef@rivosinc.com>
12
13
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
14
Co-authored-by: Max Chou <max.chou@sifive.com>
15
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
16
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
17
Signed-off-by: Max Chou <max.chou@sifive.com>
18
[max.chou@sifive.com: Exposed x-zvbc property]
19
Message-ID: <20230711165917.2629866-5-max.chou@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
21
---
15
target/riscv/cpu.h | 7 +
22
target/riscv/cpu_cfg.h | 1 +
16
target/riscv/helper.h | 1 +
23
target/riscv/helper.h | 6 +++
17
target/riscv/cpu_helper.c | 259 ++++++++++++++++++
24
target/riscv/insn32.decode | 6 +++
18
target/riscv/op_helper.c | 19 ++
25
target/riscv/cpu.c | 9 ++++
19
target/riscv/translate.c | 46 ++++
26
target/riscv/translate.c | 1 +
20
.../riscv/insn_trans/trans_privileged.c.inc | 2 +
27
target/riscv/vcrypto_helper.c | 59 ++++++++++++++++++++++
21
target/riscv/insn_trans/trans_rvi.c.inc | 75 +++++
28
target/riscv/insn_trans/trans_rvvk.c.inc | 62 ++++++++++++++++++++++++
22
target/riscv/insn_trans/trans_rvzce.c.inc | 21 ++
29
target/riscv/meson.build | 3 +-
23
8 files changed, 430 insertions(+)
30
8 files changed, 146 insertions(+), 1 deletion(-)
24
31
create mode 100644 target/riscv/vcrypto_helper.c
25
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
32
create mode 100644 target/riscv/insn_trans/trans_rvvk.c.inc
26
index XXXXXXX..XXXXXXX 100644
33
27
--- a/target/riscv/cpu.h
34
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
28
+++ b/target/riscv/cpu.h
35
index XXXXXXX..XXXXXXX 100644
29
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
36
--- a/target/riscv/cpu_cfg.h
30
uint32_t sctrstatus;
37
+++ b/target/riscv/cpu_cfg.h
31
uint64_t vsctrctl;
38
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
32
39
bool ext_zve32f;
33
+ uint64_t ctr_src[16 << SCTRDEPTH_MAX];
40
bool ext_zve64f;
34
+ uint64_t ctr_dst[16 << SCTRDEPTH_MAX];
41
bool ext_zve64d;
35
+ uint64_t ctr_data[16 << SCTRDEPTH_MAX];
42
+ bool ext_zvbc;
36
+
43
bool ext_zmmul;
37
/* Machine and Supervisor interrupt priorities */
44
bool ext_zvfbfmin;
38
uint8_t miprio[64];
45
bool ext_zvfbfwma;
39
uint8_t siprio[64];
40
@@ -XXX,XX +XXX,XX @@ RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
41
42
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
43
44
+void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
45
+ enum CTRType type, target_ulong prev_priv, bool prev_virt);
46
+
47
void riscv_translate_init(void);
48
void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
49
int *max_insns, vaddr pc, void *host_pc);
50
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
46
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
51
index XXXXXXX..XXXXXXX 100644
47
index XXXXXXX..XXXXXXX 100644
52
--- a/target/riscv/helper.h
48
--- a/target/riscv/helper.h
53
+++ b/target/riscv/helper.h
49
+++ b/target/riscv/helper.h
54
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_1(wfi, void, env)
50
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vfwcvtbf16_f_f_v, void, ptr, ptr, ptr, env, i32)
55
DEF_HELPER_1(wrs_nto, void, env)
51
56
DEF_HELPER_1(tlb_flush, void, env)
52
DEF_HELPER_6(vfwmaccbf16_vv, void, ptr, ptr, ptr, ptr, env, i32)
57
DEF_HELPER_1(tlb_flush_all, void, env)
53
DEF_HELPER_6(vfwmaccbf16_vf, void, ptr, ptr, i64, ptr, env, i32)
58
+DEF_HELPER_4(ctr_add_entry, void, env, tl, tl, tl)
54
+
59
/* Native Debug */
55
+/* Vector crypto functions */
60
DEF_HELPER_1(itrigger_match, void, env)
56
+DEF_HELPER_6(vclmul_vv, void, ptr, ptr, ptr, ptr, env, i32)
61
#endif
57
+DEF_HELPER_6(vclmul_vx, void, ptr, ptr, tl, ptr, env, i32)
62
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
58
+DEF_HELPER_6(vclmulh_vv, void, ptr, ptr, ptr, ptr, env, i32)
63
index XXXXXXX..XXXXXXX 100644
59
+DEF_HELPER_6(vclmulh_vx, void, ptr, ptr, tl, ptr, env, i32)
64
--- a/target/riscv/cpu_helper.c
60
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
65
+++ b/target/riscv/cpu_helper.c
61
index XXXXXXX..XXXXXXX 100644
66
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
62
--- a/target/riscv/insn32.decode
63
+++ b/target/riscv/insn32.decode
64
@@ -XXX,XX +XXX,XX @@ vfwcvtbf16_f_f_v 010010 . ..... 01101 001 ..... 1010111 @r2_vm
65
# *** Zvfbfwma Standard Extension ***
66
vfwmaccbf16_vv 111011 . ..... ..... 001 ..... 1010111 @r_vm
67
vfwmaccbf16_vf 111011 . ..... ..... 101 ..... 1010111 @r_vm
68
+
69
+# *** Zvbc vector crypto extension ***
70
+vclmul_vv 001100 . ..... ..... 010 ..... 1010111 @r_vm
71
+vclmul_vx 001100 . ..... ..... 110 ..... 1010111 @r_vm
72
+vclmulh_vv 001101 . ..... ..... 010 ..... 1010111 @r_vm
73
+vclmulh_vx 001101 . ..... ..... 110 ..... 1010111 @r_vm
74
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
75
index XXXXXXX..XXXXXXX 100644
76
--- a/target/riscv/cpu.c
77
+++ b/target/riscv/cpu.c
78
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
79
ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
80
ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
81
ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
82
+ ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
83
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
84
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
85
ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
86
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
87
return;
67
}
88
}
68
}
89
69
90
+ if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
70
+static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
91
+ error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
71
+ bool virt)
72
+{
73
+ uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
74
+
75
+ assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
76
+
77
+ if (ctl & freeze_mask) {
78
+ env->sctrstatus |= SCTRSTATUS_FROZEN;
79
+ }
80
+}
81
+
82
+static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
83
+{
84
+ switch (priv) {
85
+ case PRV_M:
86
+ return MCTRCTL_M;
87
+ case PRV_S:
88
+ if (virt) {
89
+ return XCTRCTL_S;
90
+ }
91
+ return XCTRCTL_S;
92
+ case PRV_U:
93
+ if (virt) {
94
+ return XCTRCTL_U;
95
+ }
96
+ return XCTRCTL_U;
97
+ }
98
+
99
+ g_assert_not_reached();
100
+}
101
+
102
+static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
103
+ bool virt)
104
+{
105
+ switch (priv) {
106
+ case PRV_M:
107
+ return env->mctrctl;
108
+ case PRV_S:
109
+ case PRV_U:
110
+ if (virt) {
111
+ return env->vsctrctl;
112
+ }
113
+ return env->mctrctl;
114
+ }
115
+
116
+ g_assert_not_reached();
117
+}
118
+
119
+/*
120
+ * This function assumes that src privilege and target privilege are not same
121
+ * and src privilege is less than target privilege. This includes the virtual
122
+ * state as well.
123
+ */
124
+static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
125
+ bool src_virt)
126
+{
127
+ target_long tgt_prv = env->priv;
128
+ bool res = true;
129
+
130
+ /*
131
+ * VS and U mode are same in terms of xTE bits required to record an
132
+ * external trap. See 6.1.2. External Traps, table 8 External Trap Enable
133
+ * Requirements. This changes VS to U to simplify the logic a bit.
134
+ */
135
+ if (src_virt && src_prv == PRV_S) {
136
+ src_prv = PRV_U;
137
+ } else if (env->virt_enabled && tgt_prv == PRV_S) {
138
+ tgt_prv = PRV_U;
139
+ }
140
+
141
+ /* VU mode is an outlier here. */
142
+ if (src_virt && src_prv == PRV_U) {
143
+ res &= !!(env->vsctrctl & XCTRCTL_STE);
144
+ }
145
+
146
+ switch (src_prv) {
147
+ case PRV_U:
148
+ if (tgt_prv == PRV_U) {
149
+ break;
150
+ }
151
+ res &= !!(env->mctrctl & XCTRCTL_STE);
152
+ /* fall-through */
153
+ case PRV_S:
154
+ if (tgt_prv == PRV_S) {
155
+ break;
156
+ }
157
+ res &= !!(env->mctrctl & MCTRCTL_MTE);
158
+ /* fall-through */
159
+ case PRV_M:
160
+ break;
161
+ }
162
+
163
+ return res;
164
+}
165
+
166
+/*
167
+ * Special cases for traps and trap returns:
168
+ *
169
+ * 1- Traps, and trap returns, between enabled modes are recorded as normal.
170
+ * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
171
+ * enabled mode back to an inhibited mode, are partially recorded. In such
172
+ * cases, the PC from the inhibited mode (source PC for traps, and target PC
173
+ * for trap returns) is 0.
174
+ *
175
+ * 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
176
+ * Traps from an enabled mode to an inhibited mode, known as external traps,
177
+ * receive special handling.
178
+ * By default external traps are not recorded, but a handshake mechanism exists
179
+ * to allow partial recording. Software running in the target mode of the trap
180
+ * can opt-in to allowing CTR to record traps into that mode even when the mode
181
+ * is inhibited. The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
182
+ * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
183
+ * x is the target privilege mode of the trap, will CTR record the trap. In such
184
+ * cases, the target PC is 0.
185
+ */
186
+/*
187
+ * CTR arrays are implemented as circular buffers and new entry is stored at
188
+ * sctrstatus.WRPTR, but they are presented to software as moving circular
189
+ * buffers. Which means, software get's the illusion that whenever a new entry
190
+ * is added the whole buffer is moved by one place and the new entry is added at
191
+ * the start keeping new entry at idx 0 and older ones follow.
192
+ *
193
+ * Depth = 16.
194
+ *
195
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
196
+ * WRPTR W
197
+ * entry 7 6 5 4 3 2 1 0 F E D C B A 9 8
198
+ *
199
+ * When a new entry is added:
200
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
201
+ * WRPTR W
202
+ * entry 8 7 6 5 4 3 2 1 0 F E D C B A 9
203
+ *
204
+ * entry here denotes the logical entry number that software can access
205
+ * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
206
+ * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
207
+ * buffer[7]. Here is how we convert entry to buffer idx.
208
+ *
209
+ * entry = isel - CTR_ENTRIES_FIRST;
210
+ * idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
211
+ */
212
+void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
213
+ enum CTRType type, target_ulong src_priv, bool src_virt)
214
+{
215
+ bool tgt_virt = env->virt_enabled;
216
+ uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
217
+ uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
218
+ uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
219
+ uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
220
+ uint64_t depth, head;
221
+ bool ext_trap = false;
222
+
223
+ /*
224
+ * Return immediately if both target and src recording is disabled or if
225
+ * CTR is in frozen state.
226
+ */
227
+ if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
228
+ env->sctrstatus & SCTRSTATUS_FROZEN) {
229
+ return;
92
+ return;
230
+ }
93
+ }
231
+
94
+
232
+ /*
95
if (cpu->cfg.ext_zk) {
233
+ * With RAS Emul enabled, only allow Indirect, direct calls, Function
96
cpu->cfg.ext_zkn = true;
234
+ * returns and Co-routine swap types.
97
cpu->cfg.ext_zkr = true;
235
+ */
98
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
236
+ if (tgt_ctrl & XCTRCTL_RASEMU &&
99
DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false),
237
+ type != CTRDATA_TYPE_INDIRECT_CALL &&
100
DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
238
+ type != CTRDATA_TYPE_DIRECT_CALL &&
101
239
+ type != CTRDATA_TYPE_RETURN &&
102
+ /* Vector cryptography extensions */
240
+ type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
103
+ DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
241
+ return;
104
+
242
+ }
105
DEFINE_PROP_END_OF_LIST(),
243
+
106
};
244
+ if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
107
245
+ /* Case 2 for traps. */
246
+ if (!(src_ctrl & src_mask)) {
247
+ src = 0;
248
+ } else if (!(tgt_ctrl & tgt_mask)) {
249
+ /* Check if target priv-mode has allowed external trap recording. */
250
+ if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
251
+ return;
252
+ }
253
+
254
+ ext_trap = true;
255
+ dst = 0;
256
+ }
257
+ } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
258
+ /*
259
+ * Case 3 for trap returns. Trap returns from inhibited mode are not
260
+ * recorded.
261
+ */
262
+ if (!(src_ctrl & src_mask)) {
263
+ return;
264
+ }
265
+
266
+ /* Case 2 for trap returns. */
267
+ if (!(tgt_ctrl & tgt_mask)) {
268
+ dst = 0;
269
+ }
270
+ }
271
+
272
+ /* Ignore filters in case of RASEMU mode or External trap. */
273
+ if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
274
+ /*
275
+ * Check if the specific type is inhibited. Not taken branch filter is
276
+ * an enable bit and needs to be checked separatly.
277
+ */
278
+ bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
279
+ if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
280
+ (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
281
+ return;
282
+ }
283
+ }
284
+
285
+ head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
286
+
287
+ depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
288
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
289
+ head = (head - 1) & (depth - 1);
290
+
291
+ env->ctr_src[head] &= ~CTRSOURCE_VALID;
292
+ env->sctrstatus =
293
+ set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
294
+ return;
295
+ }
296
+
297
+ /* In case of Co-routine SWAP we overwrite latest entry. */
298
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
299
+ head = (head - 1) & (depth - 1);
300
+ }
301
+
302
+ env->ctr_src[head] = src | CTRSOURCE_VALID;
303
+ env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
304
+ env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
305
+
306
+ head = (head + 1) & (depth - 1);
307
+
308
+ env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
309
+}
310
+
311
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
312
{
313
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
314
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
315
!(env->mip & (1ULL << cause));
316
bool smode_double_trap = false;
317
uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
318
+ const bool prev_virt = env->virt_enabled;
319
+ const target_ulong prev_priv = env->priv;
320
target_ulong tval = 0;
321
target_ulong tinst = 0;
322
target_ulong htval = 0;
323
target_ulong mtval2 = 0;
324
+ target_ulong src;
325
int sxlen = 0;
326
int mxlen = 16 << riscv_cpu_mxl(env);
327
bool nnmi_excep = false;
328
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
329
env->pc = (env->stvec >> 2 << 2) +
330
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
331
riscv_cpu_set_mode(env, PRV_S, virt);
332
+
333
+ src = env->sepc;
334
} else {
335
/*
336
* If the hart encounters an exception while executing in M-mode
337
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
338
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
339
}
340
riscv_cpu_set_mode(env, PRV_M, virt);
341
+ src = env->mepc;
342
+ }
343
+
344
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
345
+ if (async && cause == IRQ_PMU_OVF) {
346
+ riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
347
+ } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
348
+ riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
349
+ }
350
+
351
+ riscv_ctr_add_entry(env, src, env->pc,
352
+ async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
353
+ prev_priv, prev_virt);
354
}
355
356
/*
357
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
358
index XXXXXXX..XXXXXXX 100644
359
--- a/target/riscv/op_helper.c
360
+++ b/target/riscv/op_helper.c
361
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env)
362
{
363
uint64_t mstatus;
364
target_ulong prev_priv, prev_virt = env->virt_enabled;
365
+ const target_ulong src_priv = env->priv;
366
+ const bool src_virt = env->virt_enabled;
367
368
if (!(env->priv >= PRV_S)) {
369
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
370
@@ -XXX,XX +XXX,XX @@ target_ulong helper_sret(CPURISCVState *env)
371
}
372
env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, 0);
373
374
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
375
+ riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
376
+ src_priv, src_virt);
377
+ }
378
+
379
return retpc;
380
}
381
382
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mret(CPURISCVState *env)
383
}
384
env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, 0);
385
386
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
387
+ riscv_ctr_add_entry(env, env->pc, retpc, CTRDATA_TYPE_EXCEP_INT_RET,
388
+ PRV_M, false);
389
+ }
390
+
391
return retpc;
392
}
393
394
@@ -XXX,XX +XXX,XX @@ target_ulong helper_mnret(CPURISCVState *env)
395
return retpc;
396
}
397
398
+void helper_ctr_add_entry(CPURISCVState *env, target_ulong src,
399
+ target_ulong dest, target_ulong type)
400
+{
401
+ riscv_ctr_add_entry(env, src, dest, (enum CTRType)type,
402
+ env->priv, env->virt_enabled);
403
+}
404
+
405
void helper_wfi(CPURISCVState *env)
406
{
407
CPUState *cs = env_cpu(env);
408
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
108
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
409
index XXXXXXX..XXXXXXX 100644
109
index XXXXXXX..XXXXXXX 100644
410
--- a/target/riscv/translate.c
110
--- a/target/riscv/translate.c
411
+++ b/target/riscv/translate.c
111
+++ b/target/riscv/translate.c
412
@@ -XXX,XX +XXX,XX @@ static void gen_set_fpr_d(DisasContext *ctx, int reg_num, TCGv_i64 t)
112
@@ -XXX,XX +XXX,XX @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
413
}
113
#include "insn_trans/trans_rvzfa.c.inc"
414
}
114
#include "insn_trans/trans_rvzfh.c.inc"
415
115
#include "insn_trans/trans_rvk.c.inc"
416
+#ifndef CONFIG_USER_ONLY
116
+#include "insn_trans/trans_rvvk.c.inc"
117
#include "insn_trans/trans_privileged.c.inc"
118
#include "insn_trans/trans_svinval.c.inc"
119
#include "insn_trans/trans_rvbf16.c.inc"
120
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
121
new file mode 100644
122
index XXXXXXX..XXXXXXX
123
--- /dev/null
124
+++ b/target/riscv/vcrypto_helper.c
125
@@ -XXX,XX +XXX,XX @@
417
+/*
126
+/*
418
+ * Direct calls
127
+ * RISC-V Vector Crypto Extension Helpers for QEMU.
419
+ * - jal x1;
128
+ *
420
+ * - jal x5;
129
+ * Copyright (C) 2023 SiFive, Inc.
421
+ * - c.jal.
130
+ * Written by Codethink Ltd and SiFive.
422
+ * - cm.jalt.
131
+ *
423
+ *
132
+ * This program is free software; you can redistribute it and/or modify it
424
+ * Direct jumps
133
+ * under the terms and conditions of the GNU General Public License,
425
+ * - jal x0;
134
+ * version 2 or later, as published by the Free Software Foundation.
426
+ * - c.j;
135
+ *
427
+ * - cm.jt.
136
+ * This program is distributed in the hope it will be useful, but WITHOUT
428
+ *
137
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
429
+ * Other direct jumps
138
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
430
+ * - jal rd where rd != x1 and rd != x5 and rd != x0;
139
+ * more details.
140
+ *
141
+ * You should have received a copy of the GNU General Public License along with
142
+ * this program. If not, see <http://www.gnu.org/licenses/>.
431
+ */
143
+ */
432
+static void gen_ctr_jal(DisasContext *ctx, int rd, target_ulong imm)
144
+
433
+{
145
+#include "qemu/osdep.h"
434
+ TCGv dest = tcg_temp_new();
146
+#include "qemu/host-utils.h"
435
+ TCGv src = tcg_temp_new();
147
+#include "qemu/bitops.h"
436
+ TCGv type;
148
+#include "cpu.h"
437
+
149
+#include "exec/memop.h"
438
+ /*
150
+#include "exec/exec-all.h"
439
+ * If rd is x1 or x5 link registers, treat this as direct call otherwise
151
+#include "exec/helper-proto.h"
440
+ * its a direct jump.
152
+#include "internals.h"
441
+ */
153
+#include "vector_internals.h"
442
+ if (rd == 1 || rd == 5) {
154
+
443
+ type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
155
+static uint64_t clmul64(uint64_t y, uint64_t x)
444
+ } else if (rd == 0) {
156
+{
445
+ type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
157
+ uint64_t result = 0;
446
+ } else {
158
+ for (int j = 63; j >= 0; j--) {
447
+ type = tcg_constant_tl(CTRDATA_TYPE_OTHER_DIRECT_JUMP);
159
+ if ((y >> j) & 1) {
448
+ }
160
+ result ^= (x << j);
449
+
161
+ }
450
+ gen_pc_plus_diff(dest, ctx, imm);
162
+ }
451
+ gen_pc_plus_diff(src, ctx, 0);
163
+ return result;
452
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
164
+}
453
+}
165
+
454
+#endif
166
+static uint64_t clmulh64(uint64_t y, uint64_t x)
455
+
167
+{
456
static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
168
+ uint64_t result = 0;
457
{
169
+ for (int j = 63; j >= 1; j--) {
458
TCGv succ_pc = dest_gpr(ctx, rd);
170
+ if ((y >> j) & 1) {
459
@@ -XXX,XX +XXX,XX @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
171
+ result ^= (x >> (64 - j));
460
}
172
+ }
461
}
173
+ }
462
174
+ return result;
463
+#ifndef CONFIG_USER_ONLY
175
+}
464
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
176
+
465
+ gen_ctr_jal(ctx, rd, imm);
177
+RVVCALL(OPIVV2, vclmul_vv, OP_UUU_D, H8, H8, H8, clmul64)
466
+ }
178
+GEN_VEXT_VV(vclmul_vv, 8)
467
+#endif
179
+RVVCALL(OPIVX2, vclmul_vx, OP_UUU_D, H8, H8, clmul64)
468
+
180
+GEN_VEXT_VX(vclmul_vx, 8)
469
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
181
+RVVCALL(OPIVV2, vclmulh_vv, OP_UUU_D, H8, H8, H8, clmulh64)
470
gen_set_gpr(ctx, rd, succ_pc);
182
+GEN_VEXT_VV(vclmulh_vv, 8)
471
183
+RVVCALL(OPIVX2, vclmulh_vx, OP_UUU_D, H8, H8, clmulh64)
472
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
184
+GEN_VEXT_VX(vclmulh_vx, 8)
473
index XXXXXXX..XXXXXXX 100644
185
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
474
--- a/target/riscv/insn_trans/trans_privileged.c.inc
186
new file mode 100644
475
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
187
index XXXXXXX..XXXXXXX
476
@@ -XXX,XX +XXX,XX @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
188
--- /dev/null
477
if (has_ext(ctx, RVS)) {
189
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
478
decode_save_opc(ctx, 0);
190
@@ -XXX,XX +XXX,XX @@
479
translator_io_start(&ctx->base);
480
+ gen_update_pc(ctx, 0);
481
gen_helper_sret(cpu_pc, tcg_env);
482
exit_tb(ctx); /* no chaining */
483
ctx->base.is_jmp = DISAS_NORETURN;
484
@@ -XXX,XX +XXX,XX @@ static bool trans_mret(DisasContext *ctx, arg_mret *a)
485
#ifndef CONFIG_USER_ONLY
486
decode_save_opc(ctx, 0);
487
translator_io_start(&ctx->base);
488
+ gen_update_pc(ctx, 0);
489
gen_helper_mret(cpu_pc, tcg_env);
490
exit_tb(ctx); /* no chaining */
491
ctx->base.is_jmp = DISAS_NORETURN;
492
diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc
493
index XXXXXXX..XXXXXXX 100644
494
--- a/target/riscv/insn_trans/trans_rvi.c.inc
495
+++ b/target/riscv/insn_trans/trans_rvi.c.inc
496
@@ -XXX,XX +XXX,XX @@ static bool trans_jal(DisasContext *ctx, arg_jal *a)
497
return true;
498
}
499
500
+#ifndef CONFIG_USER_ONLY
501
+/*
191
+/*
502
+ * Indirect calls
192
+ * RISC-V translation routines for the vector crypto extension.
503
+ * - jalr x1, rs where rs != x5;
193
+ *
504
+ * - jalr x5, rs where rs != x1;
194
+ * Copyright (C) 2023 SiFive, Inc.
505
+ * - c.jalr rs1 where rs1 != x5;
195
+ * Written by Codethink Ltd and SiFive.
506
+ *
196
+ *
507
+ * Indirect jumps
197
+ * This program is free software; you can redistribute it and/or modify it
508
+ * - jalr x0, rs where rs != x1 and rs != x5;
198
+ * under the terms and conditions of the GNU General Public License,
509
+ * - c.jr rs1 where rs1 != x1 and rs1 != x5.
199
+ * version 2 or later, as published by the Free Software Foundation.
510
+ *
200
+ *
511
+ * Returns
201
+ * This program is distributed in the hope it will be useful, but WITHOUT
512
+ * - jalr rd, rs where (rs == x1 or rs == x5) and rd != x1 and rd != x5;
202
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
513
+ * - c.jr rs1 where rs1 == x1 or rs1 == x5.
203
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
514
+ *
204
+ * more details.
515
+ * Co-routine swap
205
+ *
516
+ * - jalr x1, x5;
206
+ * You should have received a copy of the GNU General Public License along with
517
+ * - jalr x5, x1;
207
+ * this program. If not, see <http://www.gnu.org/licenses/>.
518
+ * - c.jalr x5.
519
+ *
520
+ * Other indirect jumps
521
+ * - jalr rd, rs where rs != x1, rs != x5, rd != x0, rd != x1 and rd != x5.
522
+ */
208
+ */
523
+static void gen_ctr_jalr(DisasContext *ctx, arg_jalr *a, TCGv dest)
209
+
524
+{
210
+/*
525
+ TCGv src = tcg_temp_new();
211
+ * Zvbc
526
+ TCGv type;
212
+ */
527
+
213
+
528
+ if ((a->rd == 1 && a->rs1 != 5) || (a->rd == 5 && a->rs1 != 1)) {
214
+#define GEN_VV_MASKED_TRANS(NAME, CHECK) \
529
+ type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_CALL);
215
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
530
+ } else if (a->rd == 0 && a->rs1 != 1 && a->rs1 != 5) {
216
+ { \
531
+ type = tcg_constant_tl(CTRDATA_TYPE_INDIRECT_JUMP);
217
+ if (CHECK(s, a)) { \
532
+ } else if ((a->rs1 == 1 || a->rs1 == 5) && (a->rd != 1 && a->rd != 5)) {
218
+ return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, \
533
+ type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
219
+ gen_helper_##NAME, s); \
534
+ } else if ((a->rs1 == 1 && a->rd == 5) || (a->rs1 == 5 && a->rd == 1)) {
220
+ } \
535
+ type = tcg_constant_tl(CTRDATA_TYPE_CO_ROUTINE_SWAP);
221
+ return false; \
536
+ } else {
222
+ }
537
+ type = tcg_constant_tl(CTRDATA_TYPE_OTHER_INDIRECT_JUMP);
223
+
538
+ }
224
+static bool vclmul_vv_check(DisasContext *s, arg_rmrr *a)
539
+
225
+{
540
+ gen_pc_plus_diff(src, ctx, 0);
226
+ return opivv_check(s, a) &&
541
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
227
+ s->cfg_ptr->ext_zvbc == true &&
542
+}
228
+ s->sew == MO_64;
543
+#endif
229
+}
544
+
230
+
545
static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
231
+GEN_VV_MASKED_TRANS(vclmul_vv, vclmul_vv_check)
546
{
232
+GEN_VV_MASKED_TRANS(vclmulh_vv, vclmul_vv_check)
547
TCGLabel *misaligned = NULL;
233
+
548
@@ -XXX,XX +XXX,XX @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
234
+#define GEN_VX_MASKED_TRANS(NAME, CHECK) \
549
gen_pc_plus_diff(succ_pc, ctx, ctx->cur_insn_len);
235
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
550
gen_set_gpr(ctx, a->rd, succ_pc);
236
+ { \
551
237
+ if (CHECK(s, a)) { \
552
+#ifndef CONFIG_USER_ONLY
238
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, \
553
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
239
+ gen_helper_##NAME, s); \
554
+ gen_ctr_jalr(ctx, a, target_pc);
240
+ } \
555
+ }
241
+ return false; \
556
+#endif
242
+ }
557
+
243
+
558
tcg_gen_mov_tl(cpu_pc, target_pc);
244
+static bool vclmul_vx_check(DisasContext *s, arg_rmrr *a)
559
if (ctx->fcfi_enabled) {
245
+{
560
/*
246
+ return opivx_check(s, a) &&
561
@@ -XXX,XX +XXX,XX @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
247
+ s->cfg_ptr->ext_zvbc == true &&
562
} else {
248
+ s->sew == MO_64;
563
tcg_gen_brcond_tl(cond, src1, src2, l);
249
+}
564
}
250
+
565
+
251
+GEN_VX_MASKED_TRANS(vclmul_vx, vclmul_vx_check)
566
+#ifndef CONFIG_USER_ONLY
252
+GEN_VX_MASKED_TRANS(vclmulh_vx, vclmul_vx_check)
567
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
253
diff --git a/target/riscv/meson.build b/target/riscv/meson.build
568
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_NONTAKEN_BRANCH);
254
index XXXXXXX..XXXXXXX 100644
569
+ TCGv dest = tcg_temp_new();
255
--- a/target/riscv/meson.build
570
+ TCGv src = tcg_temp_new();
256
+++ b/target/riscv/meson.build
571
+
257
@@ -XXX,XX +XXX,XX @@ riscv_ss.add(files(
572
+ gen_pc_plus_diff(src, ctx, 0);
258
'translate.c',
573
+ gen_pc_plus_diff(dest, ctx, ctx->cur_insn_len);
259
'm128_helper.c',
574
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
260
'crypto_helper.c',
575
+ }
261
- 'zce_helper.c'
576
+#endif
262
+ 'zce_helper.c',
577
+
263
+ 'vcrypto_helper.c'
578
gen_goto_tb(ctx, 1, ctx->cur_insn_len);
264
))
579
ctx->pc_save = orig_pc_save;
265
riscv_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'), if_false: files('kvm-stub.c'))
580
266
581
@@ -XXX,XX +XXX,XX @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
582
gen_pc_plus_diff(target_pc, ctx, a->imm);
583
gen_exception_inst_addr_mis(ctx, target_pc);
584
} else {
585
+#ifndef CONFIG_USER_ONLY
586
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
587
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_TAKEN_BRANCH);
588
+ TCGv dest = tcg_temp_new();
589
+ TCGv src = tcg_temp_new();
590
+
591
+ gen_pc_plus_diff(src, ctx, 0);
592
+ gen_pc_plus_diff(dest, ctx, a->imm);
593
+ gen_helper_ctr_add_entry(tcg_env, src, dest, type);
594
+ }
595
+#endif
596
gen_goto_tb(ctx, 0, a->imm);
597
}
598
ctx->pc_save = -1;
599
diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc
600
index XXXXXXX..XXXXXXX 100644
601
--- a/target/riscv/insn_trans/trans_rvzce.c.inc
602
+++ b/target/riscv/insn_trans/trans_rvzce.c.inc
603
@@ -XXX,XX +XXX,XX @@ static bool gen_pop(DisasContext *ctx, arg_cmpp *a, bool ret, bool ret_val)
604
605
if (ret) {
606
TCGv ret_addr = get_gpr(ctx, xRA, EXT_SIGN);
607
+#ifndef CONFIG_USER_ONLY
608
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
609
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_RETURN);
610
+ TCGv src = tcg_temp_new();
611
+ gen_pc_plus_diff(src, ctx, 0);
612
+ gen_helper_ctr_add_entry(tcg_env, src, ret_addr, type);
613
+ }
614
+#endif
615
tcg_gen_mov_tl(cpu_pc, ret_addr);
616
tcg_gen_lookup_and_goto_ptr();
617
ctx->base.is_jmp = DISAS_NORETURN;
618
@@ -XXX,XX +XXX,XX @@ static bool trans_cm_jalt(DisasContext *ctx, arg_cm_jalt *a)
619
gen_set_gpr(ctx, xRA, succ_pc);
620
}
621
622
+#ifndef CONFIG_USER_ONLY
623
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
624
+ if (a->index >= 32) {
625
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_CALL);
626
+ gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
627
+ } else {
628
+ TCGv type = tcg_constant_tl(CTRDATA_TYPE_DIRECT_JUMP);
629
+ gen_helper_ctr_add_entry(tcg_env, cpu_pc, addr, type);
630
+ }
631
+ }
632
+#endif
633
+
634
+
635
tcg_gen_mov_tl(cpu_pc, addr);
636
637
tcg_gen_lookup_and_goto_ptr();
638
--
267
--
639
2.48.1
268
2.41.0
diff view generated by jsdifflib
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
2
3
This function will increment a specific counter, generating an interrupt
3
Move the checks out of `do_opiv{v,x,i}_gvec{,_shift}` functions
4
when an overflow occurs.
4
and into the corresponding macros. This enables the functions to be
5
reused in proceeding commits without check duplication.
5
6
6
Some extra changes in riscv-iommu.c were required to add this new
7
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
7
helper in riscv-iommu-hpm.c:
8
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
8
9
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
9
- RISCVIOMMUContext was moved to riscv-iommu.h, making it visible in
10
Signed-off-by: Max Chou <max.chou@sifive.com>
10
riscv-iommu-hpm.c;
11
Message-ID: <20230711165917.2629866-6-max.chou@sifive.com>
11
12
- riscv_iommu_notify() is now public.
13
14
No behavior change is made since HPM support is not being advertised
15
yet.
16
17
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
18
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
19
Acked-by: Alistair Francis <alistair.francis@wdc.com>
20
Message-ID: <20250224190826.1858473-5-dbarboza@ventanamicro.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
22
---
13
---
23
hw/riscv/riscv-iommu-hpm.h | 2 +
14
target/riscv/insn_trans/trans_rvv.c.inc | 28 +++++++++++--------------
24
hw/riscv/riscv-iommu.h | 18 ++++++
15
1 file changed, 12 insertions(+), 16 deletions(-)
25
hw/riscv/riscv-iommu-hpm.c | 114 +++++++++++++++++++++++++++++++++++++
26
hw/riscv/riscv-iommu.c | 43 +++++++++-----
27
4 files changed, 162 insertions(+), 15 deletions(-)
28
16
29
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
17
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
30
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/riscv/riscv-iommu-hpm.h
19
--- a/target/riscv/insn_trans/trans_rvv.c.inc
32
+++ b/hw/riscv/riscv-iommu-hpm.h
20
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
33
@@ -XXX,XX +XXX,XX @@
21
@@ -XXX,XX +XXX,XX @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
34
#include "hw/riscv/riscv-iommu.h"
22
gen_helper_gvec_4_ptr *fn)
35
23
{
36
uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
24
TCGLabel *over = gen_new_label();
37
+void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
25
- if (!opivv_check(s, a)) {
38
+ unsigned event_id);
26
- return false;
39
27
- }
40
#endif
28
41
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
29
tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
42
index XXXXXXX..XXXXXXX 100644
30
43
--- a/hw/riscv/riscv-iommu.h
31
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
44
+++ b/hw/riscv/riscv-iommu.h
32
gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
45
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
33
gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
46
/* HPM cycle counter */
34
}; \
47
uint64_t hpmcycle_val; /* Current value of cycle register */
35
+ if (!opivv_check(s, a)) { \
48
uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
36
+ return false; \
49
+
37
+ } \
50
+ /* HPM event counters */
38
return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
51
+ GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
52
};
53
54
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
55
Error **errp);
56
void riscv_iommu_set_cap_igs(RISCVIOMMUState *s, riscv_iommu_igs_mode mode);
57
void riscv_iommu_reset(RISCVIOMMUState *s);
58
+void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type);
59
+
60
+typedef struct RISCVIOMMUContext RISCVIOMMUContext;
61
+/* Device translation context state. */
62
+struct RISCVIOMMUContext {
63
+ uint64_t devid:24; /* Requester Id, AKA device_id */
64
+ uint64_t process_id:20; /* Process ID. PASID for PCIe */
65
+ uint64_t tc; /* Translation Control */
66
+ uint64_t ta; /* Translation Attributes */
67
+ uint64_t satp; /* S-Stage address translation and protection */
68
+ uint64_t gatp; /* G-Stage address translation and protection */
69
+ uint64_t msi_addr_mask; /* MSI filtering - address mask */
70
+ uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
71
+ uint64_t msiptp; /* MSI redirection page table pointer */
72
+};
73
74
/* private helpers */
75
76
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
77
index XXXXXXX..XXXXXXX 100644
78
--- a/hw/riscv/riscv-iommu-hpm.c
79
+++ b/hw/riscv/riscv-iommu-hpm.c
80
@@ -XXX,XX +XXX,XX @@ uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
81
return (ctr_val + get_cycles() - ctr_prev) |
82
(cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
83
}
39
}
84
+
40
85
+static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
41
@@ -XXX,XX +XXX,XX @@ static inline bool
86
+{
42
do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
87
+ const uint32_t off = ctr_idx << 3;
43
gen_helper_opivx *fn)
88
+ uint64_t cntr_val;
44
{
89
+
45
- if (!opivx_check(s, a)) {
90
+ cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
46
- return false;
91
+ stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
47
- }
92
+
93
+ /* Handle the overflow scenario. */
94
+ if (cntr_val == UINT64_MAX) {
95
+ /*
96
+ * Generate interrupt only if OF bit is clear. +1 to offset the cycle
97
+ * register OF bit.
98
+ */
99
+ const uint32_t ovf =
100
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
101
+ BIT(ctr_idx + 1), 0);
102
+ if (!get_field(ovf, BIT(ctr_idx + 1))) {
103
+ riscv_iommu_reg_mod64(s,
104
+ RISCV_IOMMU_REG_IOHPMEVT_BASE + off,
105
+ RISCV_IOMMU_IOHPMEVT_OF,
106
+ 0);
107
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
108
+ }
109
+ }
110
+}
111
+
112
+void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
113
+ unsigned event_id)
114
+{
115
+ const uint32_t inhibit = riscv_iommu_reg_get32(
116
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
117
+ uint32_t did_gscid;
118
+ uint32_t pid_pscid;
119
+ uint32_t ctr_idx;
120
+ gpointer value;
121
+ uint32_t ctrs;
122
+ uint64_t evt;
123
+
124
+ if (!(s->cap & RISCV_IOMMU_CAP_HPM)) {
125
+ return;
126
+ }
127
+
128
+ value = g_hash_table_lookup(s->hpm_event_ctr_map,
129
+ GUINT_TO_POINTER(event_id));
130
+ if (value == NULL) {
131
+ return;
132
+ }
133
+
134
+ for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) {
135
+ ctr_idx = ctz32(ctrs);
136
+ if (get_field(inhibit, BIT(ctr_idx + 1))) {
137
+ continue;
138
+ }
139
+
140
+ evt = riscv_iommu_reg_get64(s,
141
+ RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3));
142
+
143
+ /*
144
+ * It's quite possible that event ID has been changed in counter
145
+ * but hashtable hasn't been updated yet. We don't want to increment
146
+ * counter for the old event ID.
147
+ */
148
+ if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) {
149
+ continue;
150
+ }
151
+
152
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) {
153
+ did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
154
+ pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
155
+ } else {
156
+ did_gscid = ctx->devid;
157
+ pid_pscid = ctx->process_id;
158
+ }
159
+
160
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) {
161
+ /*
162
+ * If the transaction does not have a valid process_id, counter
163
+ * increments if device_id matches DID_GSCID. If the transaction
164
+ * has a valid process_id, counter increments if device_id
165
+ * matches DID_GSCID and process_id matches PID_PSCID. See
166
+ * IOMMU Specification, Chapter 5.23. Performance-monitoring
167
+ * event selector.
168
+ */
169
+ if (ctx->process_id &&
170
+ get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) {
171
+ continue;
172
+ }
173
+ }
174
+
175
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) {
176
+ uint32_t mask = ~0;
177
+
178
+ if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) {
179
+ /*
180
+ * 1001 1011 mask = GSCID
181
+ * 0000 0111 mask = mask ^ (mask + 1)
182
+ * 1111 1000 mask = ~mask;
183
+ */
184
+ mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID);
185
+ mask = mask ^ (mask + 1);
186
+ mask = ~mask;
187
+ }
188
+
189
+ if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) !=
190
+ (did_gscid & mask)) {
191
+ continue;
192
+ }
193
+ }
194
+
195
+ hpm_incr_ctr(s, ctr_idx);
196
+ }
197
+}
198
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
199
index XXXXXXX..XXXXXXX 100644
200
--- a/hw/riscv/riscv-iommu.c
201
+++ b/hw/riscv/riscv-iommu.c
202
@@ -XXX,XX +XXX,XX @@
203
#define PPN_PHYS(ppn) ((ppn) << TARGET_PAGE_BITS)
204
#define PPN_DOWN(phy) ((phy) >> TARGET_PAGE_BITS)
205
206
-typedef struct RISCVIOMMUContext RISCVIOMMUContext;
207
typedef struct RISCVIOMMUEntry RISCVIOMMUEntry;
208
209
/* Device assigned I/O address space */
210
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUSpace {
211
QLIST_ENTRY(RISCVIOMMUSpace) list;
212
};
213
214
-/* Device translation context state. */
215
-struct RISCVIOMMUContext {
216
- uint64_t devid:24; /* Requester Id, AKA device_id */
217
- uint64_t process_id:20; /* Process ID. PASID for PCIe */
218
- uint64_t tc; /* Translation Control */
219
- uint64_t ta; /* Translation Attributes */
220
- uint64_t satp; /* S-Stage address translation and protection */
221
- uint64_t gatp; /* G-Stage address translation and protection */
222
- uint64_t msi_addr_mask; /* MSI filtering - address mask */
223
- uint64_t msi_addr_pattern; /* MSI filtering - address pattern */
224
- uint64_t msiptp; /* MSI redirection page table pointer */
225
-};
226
-
48
-
227
typedef enum RISCVIOMMUTransTag {
49
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
228
RISCV_IOMMU_TRANS_TAG_BY, /* Bypass */
50
TCGv_i64 src1 = tcg_temp_new_i64();
229
RISCV_IOMMU_TRANS_TAG_SS, /* Single Stage */
51
230
@@ -XXX,XX +XXX,XX @@ static uint8_t riscv_iommu_get_icvec_vector(uint32_t icvec, uint32_t vec_type)
52
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
231
}
53
gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
54
gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
55
}; \
56
+ if (!opivx_check(s, a)) { \
57
+ return false; \
58
+ } \
59
return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
232
}
60
}
233
61
234
-static void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
62
@@ -XXX,XX +XXX,XX @@ static inline bool
235
+void riscv_iommu_notify(RISCVIOMMUState *s, int vec_type)
63
do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
64
gen_helper_opivx *fn, imm_mode_t imm_mode)
236
{
65
{
237
uint32_t ipsr, icvec, vector;
66
- if (!opivx_check(s, a)) {
238
67
- return false;
239
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_spa_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
68
- }
240
}
69
-
241
}
70
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
242
71
gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
243
+
72
extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
244
+ if (pass == S_STAGE) {
73
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
245
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_S_VS_WALKS);
74
gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
246
+ } else {
75
gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
247
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_G_WALKS);
76
}; \
248
+ }
77
+ if (!opivx_check(s, a)) { \
249
+
78
+ return false; \
250
/* Read page table entry */
79
+ } \
251
if (sc[pass].ptesize == 4) {
80
return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
252
uint32_t pte32 = 0;
81
fns[s->sew], IMM_MODE); \
253
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
254
255
/* Device directory tree walk */
256
for (; depth-- > 0; ) {
257
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
258
/*
259
* Select device id index bits based on device directory tree level
260
* and device context format.
261
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
262
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_DDTE_PPN));
263
}
264
265
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_DD_WALK);
266
+
267
/* index into device context entry page */
268
addr |= (ctx->devid * dc_len) & ~TARGET_PAGE_MASK;
269
270
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
271
}
272
273
for (depth = mode - RISCV_IOMMU_DC_FSC_PDTP_MODE_PD8; depth-- > 0; ) {
274
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
275
+
276
/*
277
* Select process id index bits based on process directory tree
278
* level. See IOMMU Specification, 2.2. Process-Directory-Table.
279
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx)
280
addr = PPN_PHYS(get_field(de, RISCV_IOMMU_PC_FSC_PPN));
281
}
282
283
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_PD_WALK);
284
+
285
/* Leaf entry in PDT */
286
addr |= (ctx->process_id << 4) & ~TARGET_PAGE_MASK;
287
if (dma_memory_read(s->target_as, addr, &dc.ta, sizeof(uint64_t) * 2,
288
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
289
GHashTable *iot_cache;
290
int fault;
291
292
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_URQ);
293
+
294
iot_cache = g_hash_table_ref(s->iot_cache);
295
/*
296
* TC[32] is reserved for custom extensions, used here to temporarily
297
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
298
299
/* Check for ATS request. */
300
if (iotlb->perm == IOMMU_NONE) {
301
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_ATS_RQ);
302
/* Check if ATS is disabled. */
303
if (!(ctx->tc & RISCV_IOMMU_DC_TC_EN_ATS)) {
304
enable_pri = false;
305
@@ -XXX,XX +XXX,XX @@ static int riscv_iommu_translate(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
306
goto done;
307
}
308
309
+ riscv_iommu_hpm_incr_ctr(s, ctx, RISCV_IOMMU_HPMEVENT_TLB_MISS);
310
+
311
/* Translate using device directory / page table information. */
312
fault = riscv_iommu_spa_fetch(s, ctx, iotlb);
313
314
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
315
memory_region_init_io(&s->trap_mr, OBJECT(dev), &riscv_iommu_trap_ops, s,
316
"riscv-iommu-trap", ~0ULL);
317
address_space_init(&s->trap_as, &s->trap_mr, "riscv-iommu-trap-as");
318
+
319
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
320
+ s->hpm_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
321
+ }
322
}
82
}
323
83
@@ -XXX,XX +XXX,XX @@ static inline bool
324
static void riscv_iommu_unrealize(DeviceState *dev)
84
do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
325
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_unrealize(DeviceState *dev)
85
gen_helper_opivx *fn)
326
86
{
327
g_hash_table_unref(s->iot_cache);
87
- if (!opivx_check(s, a)) {
328
g_hash_table_unref(s->ctx_cache);
88
- return false;
329
+
89
- }
330
+ if (s->cap & RISCV_IOMMU_CAP_HPM) {
90
-
331
+ g_hash_table_unref(s->hpm_event_ctr_map);
91
if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
332
+ }
92
TCGv_i32 src1 = tcg_temp_new_i32();
93
94
@@ -XXX,XX +XXX,XX @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
95
gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
96
gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
97
}; \
98
- \
99
+ if (!opivx_check(s, a)) { \
100
+ return false; \
101
+ } \
102
return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
333
}
103
}
334
104
335
void riscv_iommu_reset(RISCVIOMMUState *s)
336
--
105
--
337
2.48.1
106
2.41.0
diff view generated by jsdifflib
1
From: Rodrigo Dias Correa <r@drigo.nl>
1
From: Dickon Hood <dickon.hood@codethink.co.uk>
2
2
3
Instead of migrating the raw tick_offset, goldfish_rtc migrates a
3
Zvbb (implemented in later commit) has a widening instruction, which
4
recalculated value based on QEMU_CLOCK_VIRTUAL. As QEMU_CLOCK_VIRTUAL
4
requires an extra check on the enabled extensions. Refactor
5
stands still across a save-and-restore cycle, the guest RTC becomes out
5
GEN_OPIVX_WIDEN_TRANS() to take a check function to avoid reimplementing
6
of sync with the host RTC when the VM is restored.
6
it.
7
7
8
As described in the bug description, it looks like this calculation was
8
Signed-off-by: Dickon Hood <dickon.hood@codethink.co.uk>
9
copied from pl031 RTC, which had its tick_offset migration fixed by
9
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Commit 032cfe6a79c8 ("pl031: Correctly migrate state when using -rtc
10
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
clock=host").
11
Signed-off-by: Max Chou <max.chou@sifive.com>
12
12
Message-ID: <20230711165917.2629866-7-max.chou@sifive.com>
13
Migrate the tick_offset directly, adding it as a version-dependent field
14
to VMState. Keep the old behavior when migrating from previous versions.
15
16
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2033
17
Signed-off-by: Rodrigo Dias Correa <r@drigo.nl>
18
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
19
Message-ID: <20250114212150.228241-1-r@drigo.nl>
20
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
---
14
---
22
hw/rtc/goldfish_rtc.c | 43 +++++++++++++------------------------------
15
target/riscv/insn_trans/trans_rvv.c.inc | 52 +++++++++++--------------
23
1 file changed, 13 insertions(+), 30 deletions(-)
16
1 file changed, 23 insertions(+), 29 deletions(-)
24
17
25
diff --git a/hw/rtc/goldfish_rtc.c b/hw/rtc/goldfish_rtc.c
18
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
26
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
27
--- a/hw/rtc/goldfish_rtc.c
20
--- a/target/riscv/insn_trans/trans_rvv.c.inc
28
+++ b/hw/rtc/goldfish_rtc.c
21
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
29
@@ -XXX,XX +XXX,XX @@ static void goldfish_rtc_write(void *opaque, hwaddr offset,
22
@@ -XXX,XX +XXX,XX @@ static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
30
trace_goldfish_rtc_write(offset, value);
23
vext_check_ds(s, a->rd, a->rs2, a->vm);
31
}
24
}
32
25
33
-static int goldfish_rtc_pre_save(void *opaque)
26
-static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
27
- gen_helper_opivx *fn)
34
-{
28
-{
35
- uint64_t delta;
29
- if (opivx_widen_check(s, a)) {
36
- GoldfishRTCState *s = opaque;
30
- return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
37
-
31
- }
38
- /*
32
- return false;
39
- * We want to migrate this offset, which sounds straightforward.
40
- * Unfortunately, we cannot directly pass tick_offset because
41
- * rtc_clock on destination Host might not be same source Host.
42
- *
43
- * To tackle, this we pass tick_offset relative to vm_clock from
44
- * source Host and make it relative to rtc_clock at destination Host.
45
- */
46
- delta = qemu_clock_get_ns(rtc_clock) -
47
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
48
- s->tick_offset_vmstate = s->tick_offset + delta;
49
-
50
- return 0;
51
-}
33
-}
52
-
34
-
53
static int goldfish_rtc_post_load(void *opaque, int version_id)
35
-#define GEN_OPIVX_WIDEN_TRANS(NAME) \
54
{
36
-static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
55
- uint64_t delta;
37
-{ \
56
GoldfishRTCState *s = opaque;
38
- static gen_helper_opivx * const fns[3] = { \
57
39
- gen_helper_##NAME##_b, \
58
- /*
40
- gen_helper_##NAME##_h, \
59
- * We extract tick_offset from tick_offset_vmstate by doing
41
- gen_helper_##NAME##_w \
60
- * reverse math compared to pre_save() function.
42
- }; \
61
- */
43
- return do_opivx_widen(s, a, fns[s->sew]); \
62
- delta = qemu_clock_get_ns(rtc_clock) -
44
+#define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
63
- qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
45
+static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
64
- s->tick_offset = s->tick_offset_vmstate - delta;
46
+{ \
65
+ if (version_id < 3) {
47
+ if (CHECK(s, a)) { \
66
+ /*
48
+ static gen_helper_opivx * const fns[3] = { \
67
+ * Previous versions didn't migrate tick_offset directly. Instead, they
49
+ gen_helper_##NAME##_b, \
68
+ * migrated tick_offset_vmstate, which is a recalculation based on
50
+ gen_helper_##NAME##_h, \
69
+ * QEMU_CLOCK_VIRTUAL. We use tick_offset_vmstate when migrating from
51
+ gen_helper_##NAME##_w \
70
+ * older versions.
52
+ }; \
71
+ */
53
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); \
72
+ uint64_t delta = qemu_clock_get_ns(rtc_clock) -
54
+ } \
73
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
55
+ return false; \
74
+ s->tick_offset = s->tick_offset_vmstate - delta;
56
}
75
+ }
57
76
58
-GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
77
goldfish_rtc_set_alarm(s);
59
-GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
78
60
-GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
79
@@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps goldfish_rtc_ops[2] = {
61
-GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
80
62
+GEN_OPIVX_WIDEN_TRANS(vwaddu_vx, opivx_widen_check)
81
static const VMStateDescription goldfish_rtc_vmstate = {
63
+GEN_OPIVX_WIDEN_TRANS(vwadd_vx, opivx_widen_check)
82
.name = TYPE_GOLDFISH_RTC,
64
+GEN_OPIVX_WIDEN_TRANS(vwsubu_vx, opivx_widen_check)
83
- .version_id = 2,
65
+GEN_OPIVX_WIDEN_TRANS(vwsub_vx, opivx_widen_check)
84
- .pre_save = goldfish_rtc_pre_save,
66
85
+ .version_id = 3,
67
/* WIDEN OPIVV with WIDEN */
86
.post_load = goldfish_rtc_post_load,
68
static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
87
.fields = (const VMStateField[]) {
69
@@ -XXX,XX +XXX,XX @@ GEN_OPIVX_TRANS(vrem_vx, opivx_check)
88
VMSTATE_UINT64(tick_offset_vmstate, GoldfishRTCState),
70
GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
89
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription goldfish_rtc_vmstate = {
71
GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
90
VMSTATE_UINT32(irq_pending, GoldfishRTCState),
72
GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
91
VMSTATE_UINT32(irq_enabled, GoldfishRTCState),
73
-GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
92
VMSTATE_UINT32(time_high, GoldfishRTCState),
74
-GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
93
+ VMSTATE_UINT64_V(tick_offset, GoldfishRTCState, 3),
75
-GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
94
VMSTATE_END_OF_LIST()
76
+GEN_OPIVX_WIDEN_TRANS(vwmul_vx, opivx_widen_check)
95
}
77
+GEN_OPIVX_WIDEN_TRANS(vwmulu_vx, opivx_widen_check)
96
};
78
+GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx, opivx_widen_check)
79
80
/* Vector Single-Width Integer Multiply-Add Instructions */
81
GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
82
@@ -XXX,XX +XXX,XX @@ GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
83
GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
84
GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
85
GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
86
-GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
87
-GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
88
-GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
89
-GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
90
+GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
91
+GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
92
+GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
93
+GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
94
95
/* Vector Integer Merge and Move Instructions */
96
static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
97
--
97
--
98
2.48.1
98
2.41.0
diff view generated by jsdifflib
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
To support hpm events mmio writes, done via
3
Move some macros out of `vector_helper` and into `vector_internals`.
4
riscv_iommu_process_hpmevt_write(), we're also adding the 'hpm-counters'
4
This ensures they can be used by both vector and vector-crypto helpers
5
IOMMU property that are used to determine the amount of counters
5
(latter implemented in proceeding commits).
6
available in the IOMMU.
7
6
8
Note that everything we did so far didn't change any IOMMU behavior
7
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
9
because we're still not advertising HPM capability to software. This
8
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
10
will be done in the next patch.
9
Signed-off-by: Max Chou <max.chou@sifive.com>
11
10
Message-ID: <20230711165917.2629866-8-max.chou@sifive.com>
12
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
13
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Acked-by: Alistair Francis <alistair.francis@wdc.com>
15
Message-ID: <20250224190826.1858473-9-dbarboza@ventanamicro.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
12
---
18
hw/riscv/riscv-iommu-hpm.h | 1 +
13
target/riscv/vector_internals.h | 46 +++++++++++++++++++++++++++++++++
19
hw/riscv/riscv-iommu.h | 1 +
14
target/riscv/vector_helper.c | 42 ------------------------------
20
hw/riscv/riscv-iommu-hpm.c | 88 ++++++++++++++++++++++++++++++++++++++
15
2 files changed, 46 insertions(+), 42 deletions(-)
21
hw/riscv/riscv-iommu.c | 4 +-
22
4 files changed, 93 insertions(+), 1 deletion(-)
23
16
24
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
17
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
25
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
26
--- a/hw/riscv/riscv-iommu-hpm.h
19
--- a/target/riscv/vector_internals.h
27
+++ b/hw/riscv/riscv-iommu-hpm.h
20
+++ b/target/riscv/vector_internals.h
28
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
21
@@ -XXX,XX +XXX,XX @@ void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
29
void riscv_iommu_hpm_timer_cb(void *priv);
22
/* expand macro args before macro */
30
void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
23
#define RVVCALL(macro, ...) macro(__VA_ARGS__)
31
void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s);
24
32
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg);
25
+/* (TD, T2, TX2) */
33
26
+#define OP_UU_B uint8_t, uint8_t, uint8_t
34
#endif
27
+#define OP_UU_H uint16_t, uint16_t, uint16_t
35
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
28
+#define OP_UU_W uint32_t, uint32_t, uint32_t
36
index XXXXXXX..XXXXXXX 100644
29
+#define OP_UU_D uint64_t, uint64_t, uint64_t
37
--- a/hw/riscv/riscv-iommu.h
38
+++ b/hw/riscv/riscv-iommu.h
39
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
40
41
/* HPM event counters */
42
GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
43
+ uint8_t hpm_cntrs;
44
};
45
46
void riscv_iommu_pci_setup_iommu(RISCVIOMMUState *iommu, PCIBus *bus,
47
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
48
index XXXXXXX..XXXXXXX 100644
49
--- a/hw/riscv/riscv-iommu-hpm.c
50
+++ b/hw/riscv/riscv-iommu-hpm.c
51
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
52
s->hpmcycle_prev = get_cycles();
53
hpm_setup_timer(s, s->hpmcycle_val);
54
}
55
+
30
+
56
+static inline bool check_valid_event_id(unsigned event_id)
31
/* (TD, T1, T2, TX1, TX2) */
57
+{
32
#define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
58
+ return event_id > RISCV_IOMMU_HPMEVENT_INVALID &&
33
#define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
59
+ event_id < RISCV_IOMMU_HPMEVENT_MAX;
34
#define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
35
#define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
36
37
+#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
38
+static void do_##NAME(void *vd, void *vs2, int i) \
39
+{ \
40
+ TX2 s2 = *((T2 *)vs2 + HS2(i)); \
41
+ *((TD *)vd + HD(i)) = OP(s2); \
60
+}
42
+}
61
+
43
+
62
+static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata)
44
+#define GEN_VEXT_V(NAME, ESZ) \
63
+{
45
+void HELPER(NAME)(void *vd, void *v0, void *vs2, \
64
+ uint32_t *pair = udata;
46
+ CPURISCVState *env, uint32_t desc) \
65
+
47
+{ \
66
+ if (GPOINTER_TO_UINT(value) & (1 << pair[0])) {
48
+ uint32_t vm = vext_vm(desc); \
67
+ pair[1] = GPOINTER_TO_UINT(key);
49
+ uint32_t vl = env->vl; \
68
+ return true;
50
+ uint32_t total_elems = \
69
+ }
51
+ vext_get_total_elems(env, desc, ESZ); \
70
+
52
+ uint32_t vta = vext_vta(desc); \
71
+ return false;
53
+ uint32_t vma = vext_vma(desc); \
54
+ uint32_t i; \
55
+ \
56
+ for (i = env->vstart; i < vl; i++) { \
57
+ if (!vm && !vext_elem_mask(v0, i)) { \
58
+ /* set masked-off elements to 1s */ \
59
+ vext_set_elems_1s(vd, vma, i * ESZ, \
60
+ (i + 1) * ESZ); \
61
+ continue; \
62
+ } \
63
+ do_##NAME(vd, vs2, i); \
64
+ } \
65
+ env->vstart = 0; \
66
+ /* set tail elements to 1s */ \
67
+ vext_set_elems_1s(vd, vta, vl * ESZ, \
68
+ total_elems * ESZ); \
72
+}
69
+}
73
+
70
+
74
+/* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */
71
/* operation of two vector elements */
75
+static void update_event_map(RISCVIOMMUState *s, uint64_t value,
72
typedef void opivv2_fn(void *vd, void *vs1, void *vs2, int i);
76
+ uint32_t ctr_idx)
73
77
+{
74
@@ -XXX,XX +XXX,XX @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
78
+ unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID);
75
do_##NAME, ESZ); \
79
+ uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID };
76
}
80
+ uint32_t new_value = 1 << ctr_idx;
77
81
+ gpointer data;
78
+/* Three of the widening shortening macros: */
79
+/* (TD, T1, T2, TX1, TX2) */
80
+#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
81
+#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
82
+#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
82
+
83
+
83
+ /*
84
#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
84
+ * If EventID field is RISCV_IOMMU_HPMEVENT_INVALID
85
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
85
+ * remove the current mapping.
86
+ */
87
+ if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) {
88
+ data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair);
89
+
90
+ new_value = GPOINTER_TO_UINT(data) & ~(new_value);
91
+ if (new_value != 0) {
92
+ g_hash_table_replace(s->hpm_event_ctr_map,
93
+ GUINT_TO_POINTER(pair[1]),
94
+ GUINT_TO_POINTER(new_value));
95
+ } else {
96
+ g_hash_table_remove(s->hpm_event_ctr_map,
97
+ GUINT_TO_POINTER(pair[1]));
98
+ }
99
+
100
+ return;
101
+ }
102
+
103
+ /* Update the counter mask if the event is already enabled. */
104
+ if (g_hash_table_lookup_extended(s->hpm_event_ctr_map,
105
+ GUINT_TO_POINTER(event_id),
106
+ NULL,
107
+ &data)) {
108
+ new_value |= GPOINTER_TO_UINT(data);
109
+ }
110
+
111
+ g_hash_table_insert(s->hpm_event_ctr_map,
112
+ GUINT_TO_POINTER(event_id),
113
+ GUINT_TO_POINTER(new_value));
114
+}
115
+
116
+void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
117
+{
118
+ const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3;
119
+ const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
120
+ uint64_t val = riscv_iommu_reg_get64(s, evt_reg);
121
+
122
+ if (ctr_idx >= s->hpm_cntrs) {
123
+ return;
124
+ }
125
+
126
+ /* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
127
+ if (get_field(ovf, BIT(ctr_idx + 1)) &&
128
+ !get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
129
+ /* +1 to offset CYCLE register OF bit. */
130
+ riscv_iommu_reg_mod32(
131
+ s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1));
132
+ }
133
+
134
+ if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) {
135
+ /* Reset EventID (WARL) field to invalid. */
136
+ val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID,
137
+ RISCV_IOMMU_HPMEVENT_INVALID);
138
+ riscv_iommu_reg_set64(s, evt_reg, val);
139
+ }
140
+
141
+ update_event_map(s, val, ctr_idx);
142
+}
143
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
144
index XXXXXXX..XXXXXXX 100644
86
index XXXXXXX..XXXXXXX 100644
145
--- a/hw/riscv/riscv-iommu.c
87
--- a/target/riscv/vector_helper.c
146
+++ b/hw/riscv/riscv-iommu.c
88
+++ b/target/riscv/vector_helper.c
147
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
89
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
148
90
#define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
149
case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
91
#define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
150
RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4:
92
#define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
151
- /* not yet implemented */
93
-#define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
152
+ riscv_iommu_process_hpmevt_write(s, regb & ~7);
94
-#define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
153
break;
95
-#define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
154
}
96
#define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
155
}
97
#define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
156
@@ -XXX,XX +XXX,XX @@ static const Property riscv_iommu_properties[] = {
98
#define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
157
DEFINE_PROP_BOOL("g-stage", RISCVIOMMUState, enable_g_stage, TRUE),
99
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_VF(vfwnmsac_vf_h, 4)
158
DEFINE_PROP_LINK("downstream-mr", RISCVIOMMUState, target_mr,
100
GEN_VEXT_VF(vfwnmsac_vf_w, 8)
159
TYPE_MEMORY_REGION, MemoryRegion *),
101
160
+ DEFINE_PROP_UINT8("hpm-counters", RISCVIOMMUState, hpm_cntrs,
102
/* Vector Floating-Point Square-Root Instruction */
161
+ RISCV_IOMMU_IOCOUNT_NUM),
103
-/* (TD, T2, TX2) */
162
};
104
-#define OP_UU_H uint16_t, uint16_t, uint16_t
163
105
-#define OP_UU_W uint32_t, uint32_t, uint32_t
164
static void riscv_iommu_class_init(ObjectClass *klass, void* data)
106
-#define OP_UU_D uint64_t, uint64_t, uint64_t
107
-
108
#define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
109
static void do_##NAME(void *vd, void *vs2, int i, \
110
CPURISCVState *env) \
111
@@ -XXX,XX +XXX,XX @@ GEN_VEXT_CMP_VF(vmfge_vf_w, uint32_t, H4, vmfge32)
112
GEN_VEXT_CMP_VF(vmfge_vf_d, uint64_t, H8, vmfge64)
113
114
/* Vector Floating-Point Classify Instruction */
115
-#define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
116
-static void do_##NAME(void *vd, void *vs2, int i) \
117
-{ \
118
- TX2 s2 = *((T2 *)vs2 + HS2(i)); \
119
- *((TD *)vd + HD(i)) = OP(s2); \
120
-}
121
-
122
-#define GEN_VEXT_V(NAME, ESZ) \
123
-void HELPER(NAME)(void *vd, void *v0, void *vs2, \
124
- CPURISCVState *env, uint32_t desc) \
125
-{ \
126
- uint32_t vm = vext_vm(desc); \
127
- uint32_t vl = env->vl; \
128
- uint32_t total_elems = \
129
- vext_get_total_elems(env, desc, ESZ); \
130
- uint32_t vta = vext_vta(desc); \
131
- uint32_t vma = vext_vma(desc); \
132
- uint32_t i; \
133
- \
134
- for (i = env->vstart; i < vl; i++) { \
135
- if (!vm && !vext_elem_mask(v0, i)) { \
136
- /* set masked-off elements to 1s */ \
137
- vext_set_elems_1s(vd, vma, i * ESZ, \
138
- (i + 1) * ESZ); \
139
- continue; \
140
- } \
141
- do_##NAME(vd, vs2, i); \
142
- } \
143
- env->vstart = 0; \
144
- /* set tail elements to 1s */ \
145
- vext_set_elems_1s(vd, vta, vl * ESZ, \
146
- total_elems * ESZ); \
147
-}
148
-
149
target_ulong fclass_h(uint64_t frs1)
150
{
151
float16 f = frs1;
165
--
152
--
166
2.48.1
153
2.41.0
diff view generated by jsdifflib
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
1
From: Dickon Hood <dickon.hood@codethink.co.uk>
2
2
3
This commit adds support for [m|s|vs]ctrcontrol, sctrstatus and
3
This commit adds support for the Zvbb vector-crypto extension, which
4
sctrdepth CSRs handling.
4
consists of the following instructions:
5
5
6
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
6
* vrol.[vv,vx]
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
7
* vror.[vv,vx,vi]
8
Message-ID: <20250205-b4-ctr_upstream_v6-v6-3-439d8e06c8ef@rivosinc.com>
8
* vbrev8.v
9
* vrev8.v
10
* vandn.[vv,vx]
11
* vbrev.v
12
* vclz.v
13
* vctz.v
14
* vcpop.v
15
* vwsll.[vv,vx,vi]
16
17
Translation functions are defined in
18
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
19
`target/riscv/vcrypto_helper.c`.
20
21
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
22
Co-authored-by: William Salmon <will.salmon@codethink.co.uk>
23
Co-authored-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
24
[max.chou@sifive.com: Fix imm mode of vror.vi]
25
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
26
Signed-off-by: William Salmon <will.salmon@codethink.co.uk>
27
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
28
Signed-off-by: Dickon Hood <dickon.hood@codethink.co.uk>
29
Signed-off-by: Max Chou <max.chou@sifive.com>
30
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
31
[max.chou@sifive.com: Exposed x-zvbb property]
32
Message-ID: <20230711165917.2629866-9-max.chou@sifive.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
33
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
34
---
11
target/riscv/cpu.h | 5 ++
35
target/riscv/cpu_cfg.h | 1 +
12
target/riscv/cpu_cfg.h | 2 +
36
target/riscv/helper.h | 62 +++++++++
13
target/riscv/csr.c | 144 +++++++++++++++++++++++++++++++++++++++++
37
target/riscv/insn32.decode | 20 +++
14
3 files changed, 151 insertions(+)
38
target/riscv/cpu.c | 12 ++
39
target/riscv/vcrypto_helper.c | 138 +++++++++++++++++++
40
target/riscv/insn_trans/trans_rvvk.c.inc | 164 +++++++++++++++++++++++
41
6 files changed, 397 insertions(+)
15
42
16
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
17
index XXXXXXX..XXXXXXX 100644
18
--- a/target/riscv/cpu.h
19
+++ b/target/riscv/cpu.h
20
@@ -XXX,XX +XXX,XX @@ struct CPUArchState {
21
target_ulong mcause;
22
target_ulong mtval; /* since: priv-1.10.0 */
23
24
+ uint64_t mctrctl;
25
+ uint32_t sctrdepth;
26
+ uint32_t sctrstatus;
27
+ uint64_t vsctrctl;
28
+
29
/* Machine and Supervisor interrupt priorities */
30
uint8_t miprio[64];
31
uint8_t siprio[64];
32
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
43
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
33
index XXXXXXX..XXXXXXX 100644
44
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu_cfg.h
45
--- a/target/riscv/cpu_cfg.h
35
+++ b/target/riscv/cpu_cfg.h
46
+++ b/target/riscv/cpu_cfg.h
36
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
47
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
37
bool ext_zvfhmin;
48
bool ext_zve32f;
38
bool ext_smaia;
49
bool ext_zve64f;
39
bool ext_ssaia;
50
bool ext_zve64d;
40
+ bool ext_smctr;
51
+ bool ext_zvbb;
41
+ bool ext_ssctr;
52
bool ext_zvbc;
42
bool ext_sscofpmf;
53
bool ext_zmmul;
43
bool ext_smepmp;
54
bool ext_zvfbfmin;
44
bool ext_smrnmi;
55
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
45
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
46
index XXXXXXX..XXXXXXX 100644
56
index XXXXXXX..XXXXXXX 100644
47
--- a/target/riscv/csr.c
57
--- a/target/riscv/helper.h
48
+++ b/target/riscv/csr.c
58
+++ b/target/riscv/helper.h
49
@@ -XXX,XX +XXX,XX @@ static RISCVException hgatp(CPURISCVState *env, int csrno)
59
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(vclmul_vv, void, ptr, ptr, ptr, ptr, env, i32)
50
return hmode(env, csrno);
60
DEF_HELPER_6(vclmul_vx, void, ptr, ptr, tl, ptr, env, i32)
51
}
61
DEF_HELPER_6(vclmulh_vv, void, ptr, ptr, ptr, ptr, env, i32)
52
62
DEF_HELPER_6(vclmulh_vx, void, ptr, ptr, tl, ptr, env, i32)
63
+
64
+DEF_HELPER_6(vror_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
65
+DEF_HELPER_6(vror_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
66
+DEF_HELPER_6(vror_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
67
+DEF_HELPER_6(vror_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
68
+
69
+DEF_HELPER_6(vror_vx_b, void, ptr, ptr, tl, ptr, env, i32)
70
+DEF_HELPER_6(vror_vx_h, void, ptr, ptr, tl, ptr, env, i32)
71
+DEF_HELPER_6(vror_vx_w, void, ptr, ptr, tl, ptr, env, i32)
72
+DEF_HELPER_6(vror_vx_d, void, ptr, ptr, tl, ptr, env, i32)
73
+
74
+DEF_HELPER_6(vrol_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
75
+DEF_HELPER_6(vrol_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
76
+DEF_HELPER_6(vrol_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
77
+DEF_HELPER_6(vrol_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
78
+
79
+DEF_HELPER_6(vrol_vx_b, void, ptr, ptr, tl, ptr, env, i32)
80
+DEF_HELPER_6(vrol_vx_h, void, ptr, ptr, tl, ptr, env, i32)
81
+DEF_HELPER_6(vrol_vx_w, void, ptr, ptr, tl, ptr, env, i32)
82
+DEF_HELPER_6(vrol_vx_d, void, ptr, ptr, tl, ptr, env, i32)
83
+
84
+DEF_HELPER_5(vrev8_v_b, void, ptr, ptr, ptr, env, i32)
85
+DEF_HELPER_5(vrev8_v_h, void, ptr, ptr, ptr, env, i32)
86
+DEF_HELPER_5(vrev8_v_w, void, ptr, ptr, ptr, env, i32)
87
+DEF_HELPER_5(vrev8_v_d, void, ptr, ptr, ptr, env, i32)
88
+DEF_HELPER_5(vbrev8_v_b, void, ptr, ptr, ptr, env, i32)
89
+DEF_HELPER_5(vbrev8_v_h, void, ptr, ptr, ptr, env, i32)
90
+DEF_HELPER_5(vbrev8_v_w, void, ptr, ptr, ptr, env, i32)
91
+DEF_HELPER_5(vbrev8_v_d, void, ptr, ptr, ptr, env, i32)
92
+DEF_HELPER_5(vbrev_v_b, void, ptr, ptr, ptr, env, i32)
93
+DEF_HELPER_5(vbrev_v_h, void, ptr, ptr, ptr, env, i32)
94
+DEF_HELPER_5(vbrev_v_w, void, ptr, ptr, ptr, env, i32)
95
+DEF_HELPER_5(vbrev_v_d, void, ptr, ptr, ptr, env, i32)
96
+
97
+DEF_HELPER_5(vclz_v_b, void, ptr, ptr, ptr, env, i32)
98
+DEF_HELPER_5(vclz_v_h, void, ptr, ptr, ptr, env, i32)
99
+DEF_HELPER_5(vclz_v_w, void, ptr, ptr, ptr, env, i32)
100
+DEF_HELPER_5(vclz_v_d, void, ptr, ptr, ptr, env, i32)
101
+DEF_HELPER_5(vctz_v_b, void, ptr, ptr, ptr, env, i32)
102
+DEF_HELPER_5(vctz_v_h, void, ptr, ptr, ptr, env, i32)
103
+DEF_HELPER_5(vctz_v_w, void, ptr, ptr, ptr, env, i32)
104
+DEF_HELPER_5(vctz_v_d, void, ptr, ptr, ptr, env, i32)
105
+DEF_HELPER_5(vcpop_v_b, void, ptr, ptr, ptr, env, i32)
106
+DEF_HELPER_5(vcpop_v_h, void, ptr, ptr, ptr, env, i32)
107
+DEF_HELPER_5(vcpop_v_w, void, ptr, ptr, ptr, env, i32)
108
+DEF_HELPER_5(vcpop_v_d, void, ptr, ptr, ptr, env, i32)
109
+
110
+DEF_HELPER_6(vwsll_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
111
+DEF_HELPER_6(vwsll_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
112
+DEF_HELPER_6(vwsll_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
113
+DEF_HELPER_6(vwsll_vx_b, void, ptr, ptr, tl, ptr, env, i32)
114
+DEF_HELPER_6(vwsll_vx_h, void, ptr, ptr, tl, ptr, env, i32)
115
+DEF_HELPER_6(vwsll_vx_w, void, ptr, ptr, tl, ptr, env, i32)
116
+
117
+DEF_HELPER_6(vandn_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
118
+DEF_HELPER_6(vandn_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
119
+DEF_HELPER_6(vandn_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
120
+DEF_HELPER_6(vandn_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
121
+DEF_HELPER_6(vandn_vx_b, void, ptr, ptr, tl, ptr, env, i32)
122
+DEF_HELPER_6(vandn_vx_h, void, ptr, ptr, tl, ptr, env, i32)
123
+DEF_HELPER_6(vandn_vx_w, void, ptr, ptr, tl, ptr, env, i32)
124
+DEF_HELPER_6(vandn_vx_d, void, ptr, ptr, tl, ptr, env, i32)
125
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
126
index XXXXXXX..XXXXXXX 100644
127
--- a/target/riscv/insn32.decode
128
+++ b/target/riscv/insn32.decode
129
@@ -XXX,XX +XXX,XX @@
130
%imm_u 12:s20 !function=ex_shift_12
131
%imm_bs 30:2 !function=ex_shift_3
132
%imm_rnum 20:4
133
+%imm_z6 26:1 15:5
134
135
# Argument sets:
136
&empty
137
@@ -XXX,XX +XXX,XX @@
138
@r_vm ...... vm:1 ..... ..... ... ..... ....... &rmrr %rs2 %rs1 %rd
139
@r_vm_1 ...... . ..... ..... ... ..... ....... &rmrr vm=1 %rs2 %rs1 %rd
140
@r_vm_0 ...... . ..... ..... ... ..... ....... &rmrr vm=0 %rs2 %rs1 %rd
141
+@r2_zimm6 ..... . vm:1 ..... ..... ... ..... ....... &rmrr %rs2 rs1=%imm_z6 %rd
142
@r2_zimm11 . zimm:11 ..... ... ..... ....... %rs1 %rd
143
@r2_zimm10 .. zimm:10 ..... ... ..... ....... %rs1 %rd
144
@r2_s ....... ..... ..... ... ..... ....... %rs2 %rs1
145
@@ -XXX,XX +XXX,XX @@ vclmul_vv 001100 . ..... ..... 010 ..... 1010111 @r_vm
146
vclmul_vx 001100 . ..... ..... 110 ..... 1010111 @r_vm
147
vclmulh_vv 001101 . ..... ..... 010 ..... 1010111 @r_vm
148
vclmulh_vx 001101 . ..... ..... 110 ..... 1010111 @r_vm
149
+
150
+# *** Zvbb vector crypto extension ***
151
+vrol_vv 010101 . ..... ..... 000 ..... 1010111 @r_vm
152
+vrol_vx 010101 . ..... ..... 100 ..... 1010111 @r_vm
153
+vror_vv 010100 . ..... ..... 000 ..... 1010111 @r_vm
154
+vror_vx 010100 . ..... ..... 100 ..... 1010111 @r_vm
155
+vror_vi 01010. . ..... ..... 011 ..... 1010111 @r2_zimm6
156
+vbrev8_v 010010 . ..... 01000 010 ..... 1010111 @r2_vm
157
+vrev8_v 010010 . ..... 01001 010 ..... 1010111 @r2_vm
158
+vandn_vv 000001 . ..... ..... 000 ..... 1010111 @r_vm
159
+vandn_vx 000001 . ..... ..... 100 ..... 1010111 @r_vm
160
+vbrev_v 010010 . ..... 01010 010 ..... 1010111 @r2_vm
161
+vclz_v 010010 . ..... 01100 010 ..... 1010111 @r2_vm
162
+vctz_v 010010 . ..... 01101 010 ..... 1010111 @r2_vm
163
+vcpop_v 010010 . ..... 01110 010 ..... 1010111 @r2_vm
164
+vwsll_vv 110101 . ..... ..... 000 ..... 1010111 @r_vm
165
+vwsll_vx 110101 . ..... ..... 100 ..... 1010111 @r_vm
166
+vwsll_vi 110101 . ..... ..... 011 ..... 1010111 @r_vm
167
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
168
index XXXXXXX..XXXXXXX 100644
169
--- a/target/riscv/cpu.c
170
+++ b/target/riscv/cpu.c
171
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
172
ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
173
ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
174
ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
175
+ ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
176
ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
177
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
178
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
179
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
180
return;
181
}
182
183
+ /*
184
+ * In principle Zve*x would also suffice here, were they supported
185
+ * in qemu
186
+ */
187
+ if (cpu->cfg.ext_zvbb && !cpu->cfg.ext_zve32f) {
188
+ error_setg(errp,
189
+ "Vector crypto extensions require V or Zve* extensions");
190
+ return;
191
+ }
192
+
193
if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
194
error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
195
return;
196
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
197
DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
198
199
/* Vector cryptography extensions */
200
+ DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
201
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
202
203
DEFINE_PROP_END_OF_LIST(),
204
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
205
index XXXXXXX..XXXXXXX 100644
206
--- a/target/riscv/vcrypto_helper.c
207
+++ b/target/riscv/vcrypto_helper.c
208
@@ -XXX,XX +XXX,XX @@
209
#include "qemu/osdep.h"
210
#include "qemu/host-utils.h"
211
#include "qemu/bitops.h"
212
+#include "qemu/bswap.h"
213
#include "cpu.h"
214
#include "exec/memop.h"
215
#include "exec/exec-all.h"
216
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVV2, vclmulh_vv, OP_UUU_D, H8, H8, H8, clmulh64)
217
GEN_VEXT_VV(vclmulh_vv, 8)
218
RVVCALL(OPIVX2, vclmulh_vx, OP_UUU_D, H8, H8, clmulh64)
219
GEN_VEXT_VX(vclmulh_vx, 8)
220
+
221
+RVVCALL(OPIVV2, vror_vv_b, OP_UUU_B, H1, H1, H1, ror8)
222
+RVVCALL(OPIVV2, vror_vv_h, OP_UUU_H, H2, H2, H2, ror16)
223
+RVVCALL(OPIVV2, vror_vv_w, OP_UUU_W, H4, H4, H4, ror32)
224
+RVVCALL(OPIVV2, vror_vv_d, OP_UUU_D, H8, H8, H8, ror64)
225
+GEN_VEXT_VV(vror_vv_b, 1)
226
+GEN_VEXT_VV(vror_vv_h, 2)
227
+GEN_VEXT_VV(vror_vv_w, 4)
228
+GEN_VEXT_VV(vror_vv_d, 8)
229
+
230
+RVVCALL(OPIVX2, vror_vx_b, OP_UUU_B, H1, H1, ror8)
231
+RVVCALL(OPIVX2, vror_vx_h, OP_UUU_H, H2, H2, ror16)
232
+RVVCALL(OPIVX2, vror_vx_w, OP_UUU_W, H4, H4, ror32)
233
+RVVCALL(OPIVX2, vror_vx_d, OP_UUU_D, H8, H8, ror64)
234
+GEN_VEXT_VX(vror_vx_b, 1)
235
+GEN_VEXT_VX(vror_vx_h, 2)
236
+GEN_VEXT_VX(vror_vx_w, 4)
237
+GEN_VEXT_VX(vror_vx_d, 8)
238
+
239
+RVVCALL(OPIVV2, vrol_vv_b, OP_UUU_B, H1, H1, H1, rol8)
240
+RVVCALL(OPIVV2, vrol_vv_h, OP_UUU_H, H2, H2, H2, rol16)
241
+RVVCALL(OPIVV2, vrol_vv_w, OP_UUU_W, H4, H4, H4, rol32)
242
+RVVCALL(OPIVV2, vrol_vv_d, OP_UUU_D, H8, H8, H8, rol64)
243
+GEN_VEXT_VV(vrol_vv_b, 1)
244
+GEN_VEXT_VV(vrol_vv_h, 2)
245
+GEN_VEXT_VV(vrol_vv_w, 4)
246
+GEN_VEXT_VV(vrol_vv_d, 8)
247
+
248
+RVVCALL(OPIVX2, vrol_vx_b, OP_UUU_B, H1, H1, rol8)
249
+RVVCALL(OPIVX2, vrol_vx_h, OP_UUU_H, H2, H2, rol16)
250
+RVVCALL(OPIVX2, vrol_vx_w, OP_UUU_W, H4, H4, rol32)
251
+RVVCALL(OPIVX2, vrol_vx_d, OP_UUU_D, H8, H8, rol64)
252
+GEN_VEXT_VX(vrol_vx_b, 1)
253
+GEN_VEXT_VX(vrol_vx_h, 2)
254
+GEN_VEXT_VX(vrol_vx_w, 4)
255
+GEN_VEXT_VX(vrol_vx_d, 8)
256
+
257
+static uint64_t brev8(uint64_t val)
258
+{
259
+ val = ((val & 0x5555555555555555ull) << 1) |
260
+ ((val & 0xAAAAAAAAAAAAAAAAull) >> 1);
261
+ val = ((val & 0x3333333333333333ull) << 2) |
262
+ ((val & 0xCCCCCCCCCCCCCCCCull) >> 2);
263
+ val = ((val & 0x0F0F0F0F0F0F0F0Full) << 4) |
264
+ ((val & 0xF0F0F0F0F0F0F0F0ull) >> 4);
265
+
266
+ return val;
267
+}
268
+
269
+RVVCALL(OPIVV1, vbrev8_v_b, OP_UU_B, H1, H1, brev8)
270
+RVVCALL(OPIVV1, vbrev8_v_h, OP_UU_H, H2, H2, brev8)
271
+RVVCALL(OPIVV1, vbrev8_v_w, OP_UU_W, H4, H4, brev8)
272
+RVVCALL(OPIVV1, vbrev8_v_d, OP_UU_D, H8, H8, brev8)
273
+GEN_VEXT_V(vbrev8_v_b, 1)
274
+GEN_VEXT_V(vbrev8_v_h, 2)
275
+GEN_VEXT_V(vbrev8_v_w, 4)
276
+GEN_VEXT_V(vbrev8_v_d, 8)
277
+
278
+#define DO_IDENTITY(a) (a)
279
+RVVCALL(OPIVV1, vrev8_v_b, OP_UU_B, H1, H1, DO_IDENTITY)
280
+RVVCALL(OPIVV1, vrev8_v_h, OP_UU_H, H2, H2, bswap16)
281
+RVVCALL(OPIVV1, vrev8_v_w, OP_UU_W, H4, H4, bswap32)
282
+RVVCALL(OPIVV1, vrev8_v_d, OP_UU_D, H8, H8, bswap64)
283
+GEN_VEXT_V(vrev8_v_b, 1)
284
+GEN_VEXT_V(vrev8_v_h, 2)
285
+GEN_VEXT_V(vrev8_v_w, 4)
286
+GEN_VEXT_V(vrev8_v_d, 8)
287
+
288
+#define DO_ANDN(a, b) ((a) & ~(b))
289
+RVVCALL(OPIVV2, vandn_vv_b, OP_UUU_B, H1, H1, H1, DO_ANDN)
290
+RVVCALL(OPIVV2, vandn_vv_h, OP_UUU_H, H2, H2, H2, DO_ANDN)
291
+RVVCALL(OPIVV2, vandn_vv_w, OP_UUU_W, H4, H4, H4, DO_ANDN)
292
+RVVCALL(OPIVV2, vandn_vv_d, OP_UUU_D, H8, H8, H8, DO_ANDN)
293
+GEN_VEXT_VV(vandn_vv_b, 1)
294
+GEN_VEXT_VV(vandn_vv_h, 2)
295
+GEN_VEXT_VV(vandn_vv_w, 4)
296
+GEN_VEXT_VV(vandn_vv_d, 8)
297
+
298
+RVVCALL(OPIVX2, vandn_vx_b, OP_UUU_B, H1, H1, DO_ANDN)
299
+RVVCALL(OPIVX2, vandn_vx_h, OP_UUU_H, H2, H2, DO_ANDN)
300
+RVVCALL(OPIVX2, vandn_vx_w, OP_UUU_W, H4, H4, DO_ANDN)
301
+RVVCALL(OPIVX2, vandn_vx_d, OP_UUU_D, H8, H8, DO_ANDN)
302
+GEN_VEXT_VX(vandn_vx_b, 1)
303
+GEN_VEXT_VX(vandn_vx_h, 2)
304
+GEN_VEXT_VX(vandn_vx_w, 4)
305
+GEN_VEXT_VX(vandn_vx_d, 8)
306
+
307
+RVVCALL(OPIVV1, vbrev_v_b, OP_UU_B, H1, H1, revbit8)
308
+RVVCALL(OPIVV1, vbrev_v_h, OP_UU_H, H2, H2, revbit16)
309
+RVVCALL(OPIVV1, vbrev_v_w, OP_UU_W, H4, H4, revbit32)
310
+RVVCALL(OPIVV1, vbrev_v_d, OP_UU_D, H8, H8, revbit64)
311
+GEN_VEXT_V(vbrev_v_b, 1)
312
+GEN_VEXT_V(vbrev_v_h, 2)
313
+GEN_VEXT_V(vbrev_v_w, 4)
314
+GEN_VEXT_V(vbrev_v_d, 8)
315
+
316
+RVVCALL(OPIVV1, vclz_v_b, OP_UU_B, H1, H1, clz8)
317
+RVVCALL(OPIVV1, vclz_v_h, OP_UU_H, H2, H2, clz16)
318
+RVVCALL(OPIVV1, vclz_v_w, OP_UU_W, H4, H4, clz32)
319
+RVVCALL(OPIVV1, vclz_v_d, OP_UU_D, H8, H8, clz64)
320
+GEN_VEXT_V(vclz_v_b, 1)
321
+GEN_VEXT_V(vclz_v_h, 2)
322
+GEN_VEXT_V(vclz_v_w, 4)
323
+GEN_VEXT_V(vclz_v_d, 8)
324
+
325
+RVVCALL(OPIVV1, vctz_v_b, OP_UU_B, H1, H1, ctz8)
326
+RVVCALL(OPIVV1, vctz_v_h, OP_UU_H, H2, H2, ctz16)
327
+RVVCALL(OPIVV1, vctz_v_w, OP_UU_W, H4, H4, ctz32)
328
+RVVCALL(OPIVV1, vctz_v_d, OP_UU_D, H8, H8, ctz64)
329
+GEN_VEXT_V(vctz_v_b, 1)
330
+GEN_VEXT_V(vctz_v_h, 2)
331
+GEN_VEXT_V(vctz_v_w, 4)
332
+GEN_VEXT_V(vctz_v_d, 8)
333
+
334
+RVVCALL(OPIVV1, vcpop_v_b, OP_UU_B, H1, H1, ctpop8)
335
+RVVCALL(OPIVV1, vcpop_v_h, OP_UU_H, H2, H2, ctpop16)
336
+RVVCALL(OPIVV1, vcpop_v_w, OP_UU_W, H4, H4, ctpop32)
337
+RVVCALL(OPIVV1, vcpop_v_d, OP_UU_D, H8, H8, ctpop64)
338
+GEN_VEXT_V(vcpop_v_b, 1)
339
+GEN_VEXT_V(vcpop_v_h, 2)
340
+GEN_VEXT_V(vcpop_v_w, 4)
341
+GEN_VEXT_V(vcpop_v_d, 8)
342
+
343
+#define DO_SLL(N, M) (N << (M & (sizeof(N) * 8 - 1)))
344
+RVVCALL(OPIVV2, vwsll_vv_b, WOP_UUU_B, H2, H1, H1, DO_SLL)
345
+RVVCALL(OPIVV2, vwsll_vv_h, WOP_UUU_H, H4, H2, H2, DO_SLL)
346
+RVVCALL(OPIVV2, vwsll_vv_w, WOP_UUU_W, H8, H4, H4, DO_SLL)
347
+GEN_VEXT_VV(vwsll_vv_b, 2)
348
+GEN_VEXT_VV(vwsll_vv_h, 4)
349
+GEN_VEXT_VV(vwsll_vv_w, 8)
350
+
351
+RVVCALL(OPIVX2, vwsll_vx_b, WOP_UUU_B, H2, H1, DO_SLL)
352
+RVVCALL(OPIVX2, vwsll_vx_h, WOP_UUU_H, H4, H2, DO_SLL)
353
+RVVCALL(OPIVX2, vwsll_vx_w, WOP_UUU_W, H8, H4, DO_SLL)
354
+GEN_VEXT_VX(vwsll_vx_b, 2)
355
+GEN_VEXT_VX(vwsll_vx_h, 4)
356
+GEN_VEXT_VX(vwsll_vx_w, 8)
357
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
358
index XXXXXXX..XXXXXXX 100644
359
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
360
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
361
@@ -XXX,XX +XXX,XX @@ static bool vclmul_vx_check(DisasContext *s, arg_rmrr *a)
362
363
GEN_VX_MASKED_TRANS(vclmul_vx, vclmul_vx_check)
364
GEN_VX_MASKED_TRANS(vclmulh_vx, vclmul_vx_check)
365
+
53
+/*
366
+/*
54
+ * M-mode:
367
+ * Zvbb
55
+ * Without ext_smctr raise illegal inst excep.
56
+ * Otherwise everything is accessible to m-mode.
57
+ *
58
+ * S-mode:
59
+ * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
60
+ * Otherwise everything other than mctrctl is accessible.
61
+ *
62
+ * VS-mode:
63
+ * Without ext_ssctr or mstateen.ctr raise illegal inst excep.
64
+ * Without hstateen.ctr raise virtual illegal inst excep.
65
+ * Otherwise allow sctrctl (vsctrctl), sctrstatus, 0x200-0x2ff entry range.
66
+ * Always raise illegal instruction exception for sctrdepth.
67
+ */
368
+ */
68
+static RISCVException ctr_mmode(CPURISCVState *env, int csrno)
369
+
370
+#define GEN_OPIVI_GVEC_TRANS_CHECK(NAME, IMM_MODE, OPIVX, SUF, CHECK) \
371
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
372
+ { \
373
+ if (CHECK(s, a)) { \
374
+ static gen_helper_opivx *const fns[4] = { \
375
+ gen_helper_##OPIVX##_b, \
376
+ gen_helper_##OPIVX##_h, \
377
+ gen_helper_##OPIVX##_w, \
378
+ gen_helper_##OPIVX##_d, \
379
+ }; \
380
+ return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew], \
381
+ IMM_MODE); \
382
+ } \
383
+ return false; \
384
+ }
385
+
386
+#define GEN_OPIVV_GVEC_TRANS_CHECK(NAME, SUF, CHECK) \
387
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
388
+ { \
389
+ if (CHECK(s, a)) { \
390
+ static gen_helper_gvec_4_ptr *const fns[4] = { \
391
+ gen_helper_##NAME##_b, \
392
+ gen_helper_##NAME##_h, \
393
+ gen_helper_##NAME##_w, \
394
+ gen_helper_##NAME##_d, \
395
+ }; \
396
+ return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
397
+ } \
398
+ return false; \
399
+ }
400
+
401
+#define GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(NAME, SUF, CHECK) \
402
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
403
+ { \
404
+ if (CHECK(s, a)) { \
405
+ static gen_helper_opivx *const fns[4] = { \
406
+ gen_helper_##NAME##_b, \
407
+ gen_helper_##NAME##_h, \
408
+ gen_helper_##NAME##_w, \
409
+ gen_helper_##NAME##_d, \
410
+ }; \
411
+ return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, \
412
+ fns[s->sew]); \
413
+ } \
414
+ return false; \
415
+ }
416
+
417
+static bool zvbb_vv_check(DisasContext *s, arg_rmrr *a)
69
+{
418
+{
70
+ /* Check if smctr-ext is present */
419
+ return opivv_check(s, a) && s->cfg_ptr->ext_zvbb == true;
71
+ if (riscv_cpu_cfg(env)->ext_smctr) {
72
+ return RISCV_EXCP_NONE;
73
+ }
74
+
75
+ return RISCV_EXCP_ILLEGAL_INST;
76
+}
420
+}
77
+
421
+
78
+static RISCVException ctr_smode(CPURISCVState *env, int csrno)
422
+static bool zvbb_vx_check(DisasContext *s, arg_rmrr *a)
79
+{
423
+{
80
+ const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
424
+ return opivx_check(s, a) && s->cfg_ptr->ext_zvbb == true;
81
+
82
+ if (!cfg->ext_smctr && !cfg->ext_ssctr) {
83
+ return RISCV_EXCP_ILLEGAL_INST;
84
+ }
85
+
86
+ RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
87
+ if (ret == RISCV_EXCP_NONE && csrno == CSR_SCTRDEPTH &&
88
+ env->virt_enabled) {
89
+ return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
90
+ }
91
+
92
+ return ret;
93
+}
425
+}
94
+
426
+
95
static RISCVException aia_hmode(CPURISCVState *env, int csrno)
427
+/* vrol.v[vx] */
96
{
428
+GEN_OPIVV_GVEC_TRANS_CHECK(vrol_vv, rotlv, zvbb_vv_check)
97
int ret;
429
+GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vrol_vx, rotls, zvbb_vx_check)
98
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
430
+
99
wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
431
+/* vror.v[vxi] */
100
}
432
+GEN_OPIVV_GVEC_TRANS_CHECK(vror_vv, rotrv, zvbb_vv_check)
101
433
+GEN_OPIVX_GVEC_SHIFT_TRANS_CHECK(vror_vx, rotrs, zvbb_vx_check)
102
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
434
+GEN_OPIVI_GVEC_TRANS_CHECK(vror_vi, IMM_TRUNC_SEW, vror_vx, rotri, zvbb_vx_check)
103
+ wr_mask |= SMSTATEEN0_CTR;
435
+
104
+ }
436
+#define GEN_OPIVX_GVEC_TRANS_CHECK(NAME, SUF, CHECK) \
105
+
437
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
106
return write_mstateen(env, csrno, wr_mask, new_val);
438
+ { \
107
}
439
+ if (CHECK(s, a)) { \
108
440
+ static gen_helper_opivx *const fns[4] = { \
109
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
441
+ gen_helper_##NAME##_b, \
110
wr_mask |= SMSTATEEN0_P1P13;
442
+ gen_helper_##NAME##_h, \
111
}
443
+ gen_helper_##NAME##_w, \
112
444
+ gen_helper_##NAME##_d, \
113
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
445
+ }; \
114
+ wr_mask |= SMSTATEEN0_CTR;
446
+ return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
115
+ }
447
+ } \
116
+
448
+ return false; \
117
return write_mstateenh(env, csrno, wr_mask, new_val);
449
+ }
118
}
450
+
119
451
+/* vandn.v[vx] */
120
@@ -XXX,XX +XXX,XX @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
452
+GEN_OPIVV_GVEC_TRANS_CHECK(vandn_vv, andc, zvbb_vv_check)
121
wr_mask |= (SMSTATEEN0_AIA | SMSTATEEN0_IMSIC);
453
+GEN_OPIVX_GVEC_TRANS_CHECK(vandn_vx, andcs, zvbb_vx_check)
122
}
454
+
123
455
+#define GEN_OPIV_TRANS(NAME, CHECK) \
124
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
456
+ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
125
+ wr_mask |= SMSTATEEN0_CTR;
457
+ { \
126
+ }
458
+ if (CHECK(s, a)) { \
127
+
459
+ uint32_t data = 0; \
128
return write_hstateen(env, csrno, wr_mask, new_val);
460
+ static gen_helper_gvec_3_ptr *const fns[4] = { \
129
}
461
+ gen_helper_##NAME##_b, \
130
462
+ gen_helper_##NAME##_h, \
131
@@ -XXX,XX +XXX,XX @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
463
+ gen_helper_##NAME##_w, \
132
{
464
+ gen_helper_##NAME##_d, \
133
uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
465
+ }; \
134
466
+ TCGLabel *over = gen_new_label(); \
135
+ if (riscv_cpu_cfg(env)->ext_ssctr) {
467
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
136
+ wr_mask |= SMSTATEEN0_CTR;
468
+ \
137
+ }
469
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
138
+
470
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
139
return write_hstateenh(env, csrno, wr_mask, new_val);
471
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
140
}
472
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
141
473
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
142
@@ -XXX,XX +XXX,XX @@ static RISCVException write_satp(CPURISCVState *env, int csrno,
474
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
143
return RISCV_EXCP_NONE;
475
+ vreg_ofs(s, a->rs2), cpu_env, \
144
}
476
+ s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
145
477
+ data, fns[s->sew]); \
146
+static RISCVException rmw_sctrdepth(CPURISCVState *env, int csrno,
478
+ mark_vs_dirty(s); \
147
+ target_ulong *ret_val,
479
+ gen_set_label(over); \
148
+ target_ulong new_val, target_ulong wr_mask)
480
+ return true; \
481
+ } \
482
+ return false; \
483
+ }
484
+
485
+static bool zvbb_opiv_check(DisasContext *s, arg_rmr *a)
149
+{
486
+{
150
+ uint64_t mask = wr_mask & SCTRDEPTH_MASK;
487
+ return s->cfg_ptr->ext_zvbb == true &&
151
+
488
+ require_rvv(s) &&
152
+ if (ret_val) {
489
+ vext_check_isa_ill(s) &&
153
+ *ret_val = env->sctrdepth;
490
+ vext_check_ss(s, a->rd, a->rs2, a->vm);
154
+ }
155
+
156
+ env->sctrdepth = (env->sctrdepth & ~mask) | (new_val & mask);
157
+
158
+ /* Correct depth. */
159
+ if (mask) {
160
+ uint64_t depth = get_field(env->sctrdepth, SCTRDEPTH_MASK);
161
+
162
+ if (depth > SCTRDEPTH_MAX) {
163
+ depth = SCTRDEPTH_MAX;
164
+ env->sctrdepth = set_field(env->sctrdepth, SCTRDEPTH_MASK, depth);
165
+ }
166
+
167
+ /* Update sctrstatus.WRPTR with a legal value */
168
+ depth = 16 << depth;
169
+ env->sctrstatus =
170
+ env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
171
+ }
172
+
173
+ return RISCV_EXCP_NONE;
174
+}
491
+}
175
+
492
+
176
+static RISCVException rmw_sctrstatus(CPURISCVState *env, int csrno,
493
+GEN_OPIV_TRANS(vbrev8_v, zvbb_opiv_check)
177
+ target_ulong *ret_val,
494
+GEN_OPIV_TRANS(vrev8_v, zvbb_opiv_check)
178
+ target_ulong new_val, target_ulong wr_mask)
495
+GEN_OPIV_TRANS(vbrev_v, zvbb_opiv_check)
496
+GEN_OPIV_TRANS(vclz_v, zvbb_opiv_check)
497
+GEN_OPIV_TRANS(vctz_v, zvbb_opiv_check)
498
+GEN_OPIV_TRANS(vcpop_v, zvbb_opiv_check)
499
+
500
+static bool vwsll_vv_check(DisasContext *s, arg_rmrr *a)
179
+{
501
+{
180
+ uint32_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
502
+ return s->cfg_ptr->ext_zvbb && opivv_widen_check(s, a);
181
+ uint32_t mask = wr_mask & SCTRSTATUS_MASK;
182
+
183
+ if (ret_val) {
184
+ *ret_val = env->sctrstatus;
185
+ }
186
+
187
+ env->sctrstatus = (env->sctrstatus & ~mask) | (new_val & mask);
188
+
189
+ /* Update sctrstatus.WRPTR with a legal value */
190
+ env->sctrstatus = env->sctrstatus & (~SCTRSTATUS_WRPTR_MASK | (depth - 1));
191
+
192
+ return RISCV_EXCP_NONE;
193
+}
503
+}
194
+
504
+
195
+static RISCVException rmw_xctrctl(CPURISCVState *env, int csrno,
505
+static bool vwsll_vx_check(DisasContext *s, arg_rmrr *a)
196
+ target_ulong *ret_val,
197
+ target_ulong new_val, target_ulong wr_mask)
198
+{
506
+{
199
+ uint64_t csr_mask, mask = wr_mask;
507
+ return s->cfg_ptr->ext_zvbb && opivx_widen_check(s, a);
200
+ uint64_t *ctl_ptr = &env->mctrctl;
201
+
202
+ if (csrno == CSR_MCTRCTL) {
203
+ csr_mask = MCTRCTL_MASK;
204
+ } else if (csrno == CSR_SCTRCTL && !env->virt_enabled) {
205
+ csr_mask = SCTRCTL_MASK;
206
+ } else {
207
+ /*
208
+ * This is for csrno == CSR_SCTRCTL and env->virt_enabled == true
209
+ * or csrno == CSR_VSCTRCTL.
210
+ */
211
+ csr_mask = VSCTRCTL_MASK;
212
+ ctl_ptr = &env->vsctrctl;
213
+ }
214
+
215
+ mask &= csr_mask;
216
+
217
+ if (ret_val) {
218
+ *ret_val = *ctl_ptr & csr_mask;
219
+ }
220
+
221
+ *ctl_ptr = (*ctl_ptr & ~mask) | (new_val & mask);
222
+
223
+ return RISCV_EXCP_NONE;
224
+}
508
+}
225
+
509
+
226
static RISCVException read_vstopi(CPURISCVState *env, int csrno,
510
+/* OPIVI without GVEC IR */
227
target_ulong *val)
511
+#define GEN_OPIVI_WIDEN_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
228
{
512
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
229
@@ -XXX,XX +XXX,XX @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
513
+ { \
230
[CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
514
+ if (CHECK(s, a)) { \
231
[CSR_MCONTEXT] = { "mcontext", debug, read_mcontext, write_mcontext },
515
+ static gen_helper_opivx *const fns[3] = { \
232
516
+ gen_helper_##OPIVX##_b, \
233
+ [CSR_MCTRCTL] = { "mctrctl", ctr_mmode, NULL, NULL, rmw_xctrctl },
517
+ gen_helper_##OPIVX##_h, \
234
+ [CSR_SCTRCTL] = { "sctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
518
+ gen_helper_##OPIVX##_w, \
235
+ [CSR_VSCTRCTL] = { "vsctrctl", ctr_smode, NULL, NULL, rmw_xctrctl },
519
+ }; \
236
+ [CSR_SCTRDEPTH] = { "sctrdepth", ctr_smode, NULL, NULL, rmw_sctrdepth },
520
+ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, \
237
+ [CSR_SCTRSTATUS] = { "sctrstatus", ctr_smode, NULL, NULL, rmw_sctrstatus },
521
+ IMM_MODE); \
238
+
522
+ } \
239
/* Performance Counters */
523
+ return false; \
240
[CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
524
+ }
241
[CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
525
+
526
+GEN_OPIVV_WIDEN_TRANS(vwsll_vv, vwsll_vv_check)
527
+GEN_OPIVX_WIDEN_TRANS(vwsll_vx, vwsll_vx_check)
528
+GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
242
--
529
--
243
2.48.1
530
2.41.0
diff view generated by jsdifflib
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
2
3
CTR extension adds a new instruction sctrclr to quickly
3
This commit adds support for the Zvkned vector-crypto extension, which
4
clear the recorded entries buffer.
4
consists of the following instructions:
5
5
6
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
6
* vaesef.[vv,vs]
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
7
* vaesdf.[vv,vs]
8
Message-ID: <20250205-b4-ctr_upstream_v6-v6-5-439d8e06c8ef@rivosinc.com>
8
* vaesdm.[vv,vs]
9
* vaesz.vs
10
* vaesem.[vv,vs]
11
* vaeskf1.vi
12
* vaeskf2.vi
13
14
Translation functions are defined in
15
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
16
`target/riscv/vcrypto_helper.c`.
17
18
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
19
Co-authored-by: William Salmon <will.salmon@codethink.co.uk>
20
[max.chou@sifive.com: Replaced vstart checking by TCG op]
21
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
22
Signed-off-by: William Salmon <will.salmon@codethink.co.uk>
23
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
24
Signed-off-by: Max Chou <max.chou@sifive.com>
25
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
26
[max.chou@sifive.com: Imported aes-round.h and exposed x-zvkned
27
property]
28
[max.chou@sifive.com: Fixed endian issues and replaced the vstart & vl
29
egs checking by helper function]
30
[max.chou@sifive.com: Replaced bswap32 calls in aes key expanding]
31
Message-ID: <20230711165917.2629866-10-max.chou@sifive.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
32
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
33
---
11
target/riscv/cpu.h | 1 +
34
target/riscv/cpu_cfg.h | 1 +
12
target/riscv/helper.h | 1 +
35
target/riscv/helper.h | 14 ++
13
target/riscv/insn32.decode | 1 +
36
target/riscv/insn32.decode | 14 ++
14
target/riscv/cpu_helper.c | 7 +++++
37
target/riscv/cpu.c | 4 +-
15
target/riscv/op_helper.c | 29 +++++++++++++++++++
38
target/riscv/vcrypto_helper.c | 202 +++++++++++++++++++++++
16
.../riscv/insn_trans/trans_privileged.c.inc | 11 +++++++
39
target/riscv/insn_trans/trans_rvvk.c.inc | 147 +++++++++++++++++
17
6 files changed, 50 insertions(+)
40
6 files changed, 381 insertions(+), 1 deletion(-)
18
41
19
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
42
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
20
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu.h
44
--- a/target/riscv/cpu_cfg.h
22
+++ b/target/riscv/cpu.h
45
+++ b/target/riscv/cpu_cfg.h
23
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
46
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
24
47
bool ext_zve64d;
25
void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
48
bool ext_zvbb;
26
enum CTRType type, target_ulong prev_priv, bool prev_virt);
49
bool ext_zvbc;
27
+void riscv_ctr_clear(CPURISCVState *env);
50
+ bool ext_zvkned;
28
51
bool ext_zmmul;
29
void riscv_translate_init(void);
52
bool ext_zvfbfmin;
30
void riscv_translate_code(CPUState *cs, TranslationBlock *tb,
53
bool ext_zvfbfwma;
31
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
54
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
32
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/helper.h
56
--- a/target/riscv/helper.h
34
+++ b/target/riscv/helper.h
57
+++ b/target/riscv/helper.h
35
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
58
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_6(vandn_vx_b, void, ptr, ptr, tl, ptr, env, i32)
36
DEF_HELPER_1(sret, tl, env)
59
DEF_HELPER_6(vandn_vx_h, void, ptr, ptr, tl, ptr, env, i32)
37
DEF_HELPER_1(mret, tl, env)
60
DEF_HELPER_6(vandn_vx_w, void, ptr, ptr, tl, ptr, env, i32)
38
DEF_HELPER_1(mnret, tl, env)
61
DEF_HELPER_6(vandn_vx_d, void, ptr, ptr, tl, ptr, env, i32)
39
+DEF_HELPER_1(ctr_clear, void, env)
62
+
40
DEF_HELPER_1(wfi, void, env)
63
+DEF_HELPER_2(egs_check, void, i32, env)
41
DEF_HELPER_1(wrs_nto, void, env)
64
+
42
DEF_HELPER_1(tlb_flush, void, env)
65
+DEF_HELPER_4(vaesef_vv, void, ptr, ptr, env, i32)
66
+DEF_HELPER_4(vaesef_vs, void, ptr, ptr, env, i32)
67
+DEF_HELPER_4(vaesdf_vv, void, ptr, ptr, env, i32)
68
+DEF_HELPER_4(vaesdf_vs, void, ptr, ptr, env, i32)
69
+DEF_HELPER_4(vaesem_vv, void, ptr, ptr, env, i32)
70
+DEF_HELPER_4(vaesem_vs, void, ptr, ptr, env, i32)
71
+DEF_HELPER_4(vaesdm_vv, void, ptr, ptr, env, i32)
72
+DEF_HELPER_4(vaesdm_vs, void, ptr, ptr, env, i32)
73
+DEF_HELPER_4(vaesz_vs, void, ptr, ptr, env, i32)
74
+DEF_HELPER_5(vaeskf1_vi, void, ptr, ptr, i32, env, i32)
75
+DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32)
43
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
76
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
44
index XXXXXXX..XXXXXXX 100644
77
index XXXXXXX..XXXXXXX 100644
45
--- a/target/riscv/insn32.decode
78
--- a/target/riscv/insn32.decode
46
+++ b/target/riscv/insn32.decode
79
+++ b/target/riscv/insn32.decode
47
@@ -XXX,XX +XXX,XX @@
80
@@ -XXX,XX +XXX,XX @@
48
# *** Privileged Instructions ***
81
@r_rm ....... ..... ..... ... ..... ....... %rs2 %rs1 %rm %rd
49
ecall 000000000000 00000 000 00000 1110011
82
@r2_rm ....... ..... ..... ... ..... ....... %rs1 %rm %rd
50
ebreak 000000000001 00000 000 00000 1110011
83
@r2 ....... ..... ..... ... ..... ....... &r2 %rs1 %rd
51
+sctrclr 000100000100 00000 000 00000 1110011
84
+@r2_vm_1 ...... . ..... ..... ... ..... ....... &rmr vm=1 %rs2 %rd
52
uret 0000000 00010 00000 000 00000 1110011
85
@r2_nfvm ... ... vm:1 ..... ..... ... ..... ....... &r2nfvm %nf %rs1 %rd
53
sret 0001000 00010 00000 000 00000 1110011
86
@r2_vm ...... vm:1 ..... ..... ... ..... ....... &rmr %rs2 %rd
54
mret 0011000 00010 00000 000 00000 1110011
87
@r1_vm ...... vm:1 ..... ..... ... ..... ....... %rd
55
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
88
@@ -XXX,XX +XXX,XX @@ vcpop_v 010010 . ..... 01110 010 ..... 1010111 @r2_vm
89
vwsll_vv 110101 . ..... ..... 000 ..... 1010111 @r_vm
90
vwsll_vx 110101 . ..... ..... 100 ..... 1010111 @r_vm
91
vwsll_vi 110101 . ..... ..... 011 ..... 1010111 @r_vm
92
+
93
+# *** Zvkned vector crypto extension ***
94
+vaesef_vv 101000 1 ..... 00011 010 ..... 1110111 @r2_vm_1
95
+vaesef_vs 101001 1 ..... 00011 010 ..... 1110111 @r2_vm_1
96
+vaesdf_vv 101000 1 ..... 00001 010 ..... 1110111 @r2_vm_1
97
+vaesdf_vs 101001 1 ..... 00001 010 ..... 1110111 @r2_vm_1
98
+vaesem_vv 101000 1 ..... 00010 010 ..... 1110111 @r2_vm_1
99
+vaesem_vs 101001 1 ..... 00010 010 ..... 1110111 @r2_vm_1
100
+vaesdm_vv 101000 1 ..... 00000 010 ..... 1110111 @r2_vm_1
101
+vaesdm_vs 101001 1 ..... 00000 010 ..... 1110111 @r2_vm_1
102
+vaesz_vs 101001 1 ..... 00111 010 ..... 1110111 @r2_vm_1
103
+vaeskf1_vi 100010 1 ..... ..... 010 ..... 1110111 @r_vm_1
104
+vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
105
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
56
index XXXXXXX..XXXXXXX 100644
106
index XXXXXXX..XXXXXXX 100644
57
--- a/target/riscv/cpu_helper.c
107
--- a/target/riscv/cpu.c
58
+++ b/target/riscv/cpu_helper.c
108
+++ b/target/riscv/cpu.c
59
@@ -XXX,XX +XXX,XX @@ static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
109
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
60
}
110
ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
61
}
111
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
62
112
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
63
+void riscv_ctr_clear(CPURISCVState *env)
113
+ ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
64
+{
114
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
65
+ memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
115
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
66
+ memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
116
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
67
+ memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
117
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
68
+}
118
* In principle Zve*x would also suffice here, were they supported
69
+
119
* in qemu
70
static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
120
*/
71
{
121
- if (cpu->cfg.ext_zvbb && !cpu->cfg.ext_zve32f) {
72
switch (priv) {
122
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned) && !cpu->cfg.ext_zve32f) {
73
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
123
error_setg(errp,
124
"Vector crypto extensions require V or Zve* extensions");
125
return;
126
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
127
/* Vector cryptography extensions */
128
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
129
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
130
+ DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
131
132
DEFINE_PROP_END_OF_LIST(),
133
};
134
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
74
index XXXXXXX..XXXXXXX 100644
135
index XXXXXXX..XXXXXXX 100644
75
--- a/target/riscv/op_helper.c
136
--- a/target/riscv/vcrypto_helper.c
76
+++ b/target/riscv/op_helper.c
137
+++ b/target/riscv/vcrypto_helper.c
77
@@ -XXX,XX +XXX,XX @@ void helper_ctr_add_entry(CPURISCVState *env, target_ulong src,
138
@@ -XXX,XX +XXX,XX @@
78
env->priv, env->virt_enabled);
139
#include "qemu/bitops.h"
79
}
140
#include "qemu/bswap.h"
80
141
#include "cpu.h"
81
+void helper_ctr_clear(CPURISCVState *env)
142
+#include "crypto/aes.h"
82
+{
143
+#include "crypto/aes-round.h"
83
+ /*
144
#include "exec/memop.h"
84
+ * It's safe to call smstateen_acc_ok() for umode access regardless of the
145
#include "exec/exec-all.h"
85
+ * state of bit 54 (CTR bit in case of m/hstateen) of sstateen. If the bit
146
#include "exec/helper-proto.h"
86
+ * is zero, smstateen_acc_ok() will return the correct exception code and
147
@@ -XXX,XX +XXX,XX @@ RVVCALL(OPIVX2, vwsll_vx_w, WOP_UUU_W, H8, H4, DO_SLL)
87
+ * if it's one, smstateen_acc_ok() will return RISCV_EXCP_NONE. In that
148
GEN_VEXT_VX(vwsll_vx_b, 2)
88
+ * scenario the U-mode check below will handle that case.
149
GEN_VEXT_VX(vwsll_vx_h, 4)
89
+ */
150
GEN_VEXT_VX(vwsll_vx_w, 8)
90
+ RISCVException ret = smstateen_acc_ok(env, 0, SMSTATEEN0_CTR);
151
+
91
+ if (ret != RISCV_EXCP_NONE) {
152
+void HELPER(egs_check)(uint32_t egs, CPURISCVState *env)
92
+ riscv_raise_exception(env, ret, GETPC());
153
+{
93
+ }
154
+ uint32_t vl = env->vl;
94
+
155
+ uint32_t vstart = env->vstart;
95
+ if (env->priv == PRV_U) {
156
+
96
+ /*
157
+ if (vl % egs != 0 || vstart % egs != 0) {
97
+ * One corner case is when sctrclr is executed from VU-mode and
158
+ riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
98
+ * mstateen.CTR = 0, in which case we are supposed to raise
159
+ }
99
+ * RISCV_EXCP_ILLEGAL_INST. This case is already handled in
160
+}
100
+ * smstateen_acc_ok().
161
+
101
+ */
162
+static inline void xor_round_key(AESState *round_state, AESState *round_key)
102
+ uint32_t excep = env->virt_enabled ? RISCV_EXCP_VIRT_INSTRUCTION_FAULT :
163
+{
103
+ RISCV_EXCP_ILLEGAL_INST;
164
+ round_state->v = round_state->v ^ round_key->v;
104
+ riscv_raise_exception(env, excep, GETPC());
165
+}
105
+ }
166
+
106
+
167
+#define GEN_ZVKNED_HELPER_VV(NAME, ...) \
107
+ riscv_ctr_clear(env);
168
+ void HELPER(NAME)(void *vd, void *vs2, CPURISCVState *env, \
108
+}
169
+ uint32_t desc) \
109
+
170
+ { \
110
void helper_wfi(CPURISCVState *env)
171
+ uint32_t vl = env->vl; \
111
{
172
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
112
CPUState *cs = env_cpu(env);
173
+ uint32_t vta = vext_vta(desc); \
113
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
174
+ \
175
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
176
+ AESState round_key; \
177
+ round_key.d[0] = *((uint64_t *)vs2 + H8(i * 2 + 0)); \
178
+ round_key.d[1] = *((uint64_t *)vs2 + H8(i * 2 + 1)); \
179
+ AESState round_state; \
180
+ round_state.d[0] = *((uint64_t *)vd + H8(i * 2 + 0)); \
181
+ round_state.d[1] = *((uint64_t *)vd + H8(i * 2 + 1)); \
182
+ __VA_ARGS__; \
183
+ *((uint64_t *)vd + H8(i * 2 + 0)) = round_state.d[0]; \
184
+ *((uint64_t *)vd + H8(i * 2 + 1)) = round_state.d[1]; \
185
+ } \
186
+ env->vstart = 0; \
187
+ /* set tail elements to 1s */ \
188
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
189
+ }
190
+
191
+#define GEN_ZVKNED_HELPER_VS(NAME, ...) \
192
+ void HELPER(NAME)(void *vd, void *vs2, CPURISCVState *env, \
193
+ uint32_t desc) \
194
+ { \
195
+ uint32_t vl = env->vl; \
196
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4); \
197
+ uint32_t vta = vext_vta(desc); \
198
+ \
199
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { \
200
+ AESState round_key; \
201
+ round_key.d[0] = *((uint64_t *)vs2 + H8(0)); \
202
+ round_key.d[1] = *((uint64_t *)vs2 + H8(1)); \
203
+ AESState round_state; \
204
+ round_state.d[0] = *((uint64_t *)vd + H8(i * 2 + 0)); \
205
+ round_state.d[1] = *((uint64_t *)vd + H8(i * 2 + 1)); \
206
+ __VA_ARGS__; \
207
+ *((uint64_t *)vd + H8(i * 2 + 0)) = round_state.d[0]; \
208
+ *((uint64_t *)vd + H8(i * 2 + 1)) = round_state.d[1]; \
209
+ } \
210
+ env->vstart = 0; \
211
+ /* set tail elements to 1s */ \
212
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
213
+ }
214
+
215
+GEN_ZVKNED_HELPER_VV(vaesef_vv, aesenc_SB_SR_AK(&round_state,
216
+ &round_state,
217
+ &round_key,
218
+ false);)
219
+GEN_ZVKNED_HELPER_VS(vaesef_vs, aesenc_SB_SR_AK(&round_state,
220
+ &round_state,
221
+ &round_key,
222
+ false);)
223
+GEN_ZVKNED_HELPER_VV(vaesdf_vv, aesdec_ISB_ISR_AK(&round_state,
224
+ &round_state,
225
+ &round_key,
226
+ false);)
227
+GEN_ZVKNED_HELPER_VS(vaesdf_vs, aesdec_ISB_ISR_AK(&round_state,
228
+ &round_state,
229
+ &round_key,
230
+ false);)
231
+GEN_ZVKNED_HELPER_VV(vaesem_vv, aesenc_SB_SR_MC_AK(&round_state,
232
+ &round_state,
233
+ &round_key,
234
+ false);)
235
+GEN_ZVKNED_HELPER_VS(vaesem_vs, aesenc_SB_SR_MC_AK(&round_state,
236
+ &round_state,
237
+ &round_key,
238
+ false);)
239
+GEN_ZVKNED_HELPER_VV(vaesdm_vv, aesdec_ISB_ISR_AK_IMC(&round_state,
240
+ &round_state,
241
+ &round_key,
242
+ false);)
243
+GEN_ZVKNED_HELPER_VS(vaesdm_vs, aesdec_ISB_ISR_AK_IMC(&round_state,
244
+ &round_state,
245
+ &round_key,
246
+ false);)
247
+GEN_ZVKNED_HELPER_VS(vaesz_vs, xor_round_key(&round_state, &round_key);)
248
+
249
+void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
250
+ CPURISCVState *env, uint32_t desc)
251
+{
252
+ uint32_t *vd = vd_vptr;
253
+ uint32_t *vs2 = vs2_vptr;
254
+ uint32_t vl = env->vl;
255
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
256
+ uint32_t vta = vext_vta(desc);
257
+
258
+ uimm &= 0b1111;
259
+ if (uimm > 10 || uimm == 0) {
260
+ uimm ^= 0b1000;
261
+ }
262
+
263
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
264
+ uint32_t rk[8], tmp;
265
+ static const uint32_t rcon[] = {
266
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
267
+ 0x00000020, 0x00000040, 0x00000080, 0x0000001B, 0x00000036,
268
+ };
269
+
270
+ rk[0] = vs2[i * 4 + H4(0)];
271
+ rk[1] = vs2[i * 4 + H4(1)];
272
+ rk[2] = vs2[i * 4 + H4(2)];
273
+ rk[3] = vs2[i * 4 + H4(3)];
274
+ tmp = ror32(rk[3], 8);
275
+
276
+ rk[4] = rk[0] ^ (((uint32_t)AES_sbox[(tmp >> 24) & 0xff] << 24) |
277
+ ((uint32_t)AES_sbox[(tmp >> 16) & 0xff] << 16) |
278
+ ((uint32_t)AES_sbox[(tmp >> 8) & 0xff] << 8) |
279
+ ((uint32_t)AES_sbox[(tmp >> 0) & 0xff] << 0))
280
+ ^ rcon[uimm - 1];
281
+ rk[5] = rk[1] ^ rk[4];
282
+ rk[6] = rk[2] ^ rk[5];
283
+ rk[7] = rk[3] ^ rk[6];
284
+
285
+ vd[i * 4 + H4(0)] = rk[4];
286
+ vd[i * 4 + H4(1)] = rk[5];
287
+ vd[i * 4 + H4(2)] = rk[6];
288
+ vd[i * 4 + H4(3)] = rk[7];
289
+ }
290
+ env->vstart = 0;
291
+ /* set tail elements to 1s */
292
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
293
+}
294
+
295
+void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
296
+ CPURISCVState *env, uint32_t desc)
297
+{
298
+ uint32_t *vd = vd_vptr;
299
+ uint32_t *vs2 = vs2_vptr;
300
+ uint32_t vl = env->vl;
301
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
302
+ uint32_t vta = vext_vta(desc);
303
+
304
+ uimm &= 0b1111;
305
+ if (uimm > 14 || uimm < 2) {
306
+ uimm ^= 0b1000;
307
+ }
308
+
309
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
310
+ uint32_t rk[12], tmp;
311
+ static const uint32_t rcon[] = {
312
+ 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
313
+ 0x00000020, 0x00000040, 0x00000080, 0x0000001B, 0x00000036,
314
+ };
315
+
316
+ rk[0] = vd[i * 4 + H4(0)];
317
+ rk[1] = vd[i * 4 + H4(1)];
318
+ rk[2] = vd[i * 4 + H4(2)];
319
+ rk[3] = vd[i * 4 + H4(3)];
320
+ rk[4] = vs2[i * 4 + H4(0)];
321
+ rk[5] = vs2[i * 4 + H4(1)];
322
+ rk[6] = vs2[i * 4 + H4(2)];
323
+ rk[7] = vs2[i * 4 + H4(3)];
324
+
325
+ if (uimm % 2 == 0) {
326
+ tmp = ror32(rk[7], 8);
327
+ rk[8] = rk[0] ^ (((uint32_t)AES_sbox[(tmp >> 24) & 0xff] << 24) |
328
+ ((uint32_t)AES_sbox[(tmp >> 16) & 0xff] << 16) |
329
+ ((uint32_t)AES_sbox[(tmp >> 8) & 0xff] << 8) |
330
+ ((uint32_t)AES_sbox[(tmp >> 0) & 0xff] << 0))
331
+ ^ rcon[(uimm - 1) / 2];
332
+ } else {
333
+ rk[8] = rk[0] ^ (((uint32_t)AES_sbox[(rk[7] >> 24) & 0xff] << 24) |
334
+ ((uint32_t)AES_sbox[(rk[7] >> 16) & 0xff] << 16) |
335
+ ((uint32_t)AES_sbox[(rk[7] >> 8) & 0xff] << 8) |
336
+ ((uint32_t)AES_sbox[(rk[7] >> 0) & 0xff] << 0));
337
+ }
338
+ rk[9] = rk[1] ^ rk[8];
339
+ rk[10] = rk[2] ^ rk[9];
340
+ rk[11] = rk[3] ^ rk[10];
341
+
342
+ vd[i * 4 + H4(0)] = rk[8];
343
+ vd[i * 4 + H4(1)] = rk[9];
344
+ vd[i * 4 + H4(2)] = rk[10];
345
+ vd[i * 4 + H4(3)] = rk[11];
346
+ }
347
+ env->vstart = 0;
348
+ /* set tail elements to 1s */
349
+ vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
350
+}
351
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
114
index XXXXXXX..XXXXXXX 100644
352
index XXXXXXX..XXXXXXX 100644
115
--- a/target/riscv/insn_trans/trans_privileged.c.inc
353
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
116
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
354
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
117
@@ -XXX,XX +XXX,XX @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a)
355
@@ -XXX,XX +XXX,XX @@ static bool vwsll_vx_check(DisasContext *s, arg_rmrr *a)
118
return true;
356
GEN_OPIVV_WIDEN_TRANS(vwsll_vv, vwsll_vv_check)
119
}
357
GEN_OPIVX_WIDEN_TRANS(vwsll_vx, vwsll_vx_check)
120
358
GEN_OPIVI_WIDEN_TRANS(vwsll_vi, IMM_ZX, vwsll_vx, vwsll_vx_check)
121
+static bool trans_sctrclr(DisasContext *ctx, arg_sctrclr *a)
359
+
122
+{
360
+/*
123
+#ifndef CONFIG_USER_ONLY
361
+ * Zvkned
124
+ if (ctx->cfg_ptr->ext_smctr || ctx->cfg_ptr->ext_ssctr) {
362
+ */
125
+ gen_helper_ctr_clear(tcg_env);
363
+
126
+ return true;
364
+#define ZVKNED_EGS 4
127
+ }
365
+
128
+#endif
366
+#define GEN_V_UNMASKED_TRANS(NAME, CHECK, EGS) \
129
+ return false;
367
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
130
+}
368
+ { \
131
+
369
+ if (CHECK(s, a)) { \
132
static bool trans_uret(DisasContext *ctx, arg_uret *a)
370
+ TCGv_ptr rd_v, rs2_v; \
133
{
371
+ TCGv_i32 desc, egs; \
134
return false;
372
+ uint32_t data = 0; \
373
+ TCGLabel *over = gen_new_label(); \
374
+ \
375
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
376
+ /* save opcode for unwinding in case we throw an exception */ \
377
+ decode_save_opc(s); \
378
+ egs = tcg_constant_i32(EGS); \
379
+ gen_helper_egs_check(egs, cpu_env); \
380
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
381
+ } \
382
+ \
383
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
384
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
385
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
386
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
387
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
388
+ rd_v = tcg_temp_new_ptr(); \
389
+ rs2_v = tcg_temp_new_ptr(); \
390
+ desc = tcg_constant_i32( \
391
+ simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
392
+ tcg_gen_addi_ptr(rd_v, cpu_env, vreg_ofs(s, a->rd)); \
393
+ tcg_gen_addi_ptr(rs2_v, cpu_env, vreg_ofs(s, a->rs2)); \
394
+ gen_helper_##NAME(rd_v, rs2_v, cpu_env, desc); \
395
+ mark_vs_dirty(s); \
396
+ gen_set_label(over); \
397
+ return true; \
398
+ } \
399
+ return false; \
400
+ }
401
+
402
+static bool vaes_check_vv(DisasContext *s, arg_rmr *a)
403
+{
404
+ int egw_bytes = ZVKNED_EGS << s->sew;
405
+ return s->cfg_ptr->ext_zvkned == true &&
406
+ require_rvv(s) &&
407
+ vext_check_isa_ill(s) &&
408
+ MAXSZ(s) >= egw_bytes &&
409
+ require_align(a->rd, s->lmul) &&
410
+ require_align(a->rs2, s->lmul) &&
411
+ s->sew == MO_32;
412
+}
413
+
414
+static bool vaes_check_overlap(DisasContext *s, int vd, int vs2)
415
+{
416
+ int8_t op_size = s->lmul <= 0 ? 1 : 1 << s->lmul;
417
+ return !is_overlapped(vd, op_size, vs2, 1);
418
+}
419
+
420
+static bool vaes_check_vs(DisasContext *s, arg_rmr *a)
421
+{
422
+ int egw_bytes = ZVKNED_EGS << s->sew;
423
+ return vaes_check_overlap(s, a->rd, a->rs2) &&
424
+ MAXSZ(s) >= egw_bytes &&
425
+ s->cfg_ptr->ext_zvkned == true &&
426
+ require_rvv(s) &&
427
+ vext_check_isa_ill(s) &&
428
+ require_align(a->rd, s->lmul) &&
429
+ s->sew == MO_32;
430
+}
431
+
432
+GEN_V_UNMASKED_TRANS(vaesef_vv, vaes_check_vv, ZVKNED_EGS)
433
+GEN_V_UNMASKED_TRANS(vaesef_vs, vaes_check_vs, ZVKNED_EGS)
434
+GEN_V_UNMASKED_TRANS(vaesdf_vv, vaes_check_vv, ZVKNED_EGS)
435
+GEN_V_UNMASKED_TRANS(vaesdf_vs, vaes_check_vs, ZVKNED_EGS)
436
+GEN_V_UNMASKED_TRANS(vaesdm_vv, vaes_check_vv, ZVKNED_EGS)
437
+GEN_V_UNMASKED_TRANS(vaesdm_vs, vaes_check_vs, ZVKNED_EGS)
438
+GEN_V_UNMASKED_TRANS(vaesz_vs, vaes_check_vs, ZVKNED_EGS)
439
+GEN_V_UNMASKED_TRANS(vaesem_vv, vaes_check_vv, ZVKNED_EGS)
440
+GEN_V_UNMASKED_TRANS(vaesem_vs, vaes_check_vs, ZVKNED_EGS)
441
+
442
+#define GEN_VI_UNMASKED_TRANS(NAME, CHECK, EGS) \
443
+ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
444
+ { \
445
+ if (CHECK(s, a)) { \
446
+ TCGv_ptr rd_v, rs2_v; \
447
+ TCGv_i32 uimm_v, desc, egs; \
448
+ uint32_t data = 0; \
449
+ TCGLabel *over = gen_new_label(); \
450
+ \
451
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
452
+ /* save opcode for unwinding in case we throw an exception */ \
453
+ decode_save_opc(s); \
454
+ egs = tcg_constant_i32(EGS); \
455
+ gen_helper_egs_check(egs, cpu_env); \
456
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
457
+ } \
458
+ \
459
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
460
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
461
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
462
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
463
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
464
+ \
465
+ rd_v = tcg_temp_new_ptr(); \
466
+ rs2_v = tcg_temp_new_ptr(); \
467
+ uimm_v = tcg_constant_i32(a->rs1); \
468
+ desc = tcg_constant_i32( \
469
+ simd_desc(s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, data)); \
470
+ tcg_gen_addi_ptr(rd_v, cpu_env, vreg_ofs(s, a->rd)); \
471
+ tcg_gen_addi_ptr(rs2_v, cpu_env, vreg_ofs(s, a->rs2)); \
472
+ gen_helper_##NAME(rd_v, rs2_v, uimm_v, cpu_env, desc); \
473
+ mark_vs_dirty(s); \
474
+ gen_set_label(over); \
475
+ return true; \
476
+ } \
477
+ return false; \
478
+ }
479
+
480
+static bool vaeskf1_check(DisasContext *s, arg_vaeskf1_vi *a)
481
+{
482
+ int egw_bytes = ZVKNED_EGS << s->sew;
483
+ return s->cfg_ptr->ext_zvkned == true &&
484
+ require_rvv(s) &&
485
+ vext_check_isa_ill(s) &&
486
+ MAXSZ(s) >= egw_bytes &&
487
+ s->sew == MO_32 &&
488
+ require_align(a->rd, s->lmul) &&
489
+ require_align(a->rs2, s->lmul);
490
+}
491
+
492
+static bool vaeskf2_check(DisasContext *s, arg_vaeskf2_vi *a)
493
+{
494
+ int egw_bytes = ZVKNED_EGS << s->sew;
495
+ return s->cfg_ptr->ext_zvkned == true &&
496
+ require_rvv(s) &&
497
+ vext_check_isa_ill(s) &&
498
+ MAXSZ(s) >= egw_bytes &&
499
+ s->sew == MO_32 &&
500
+ require_align(a->rd, s->lmul) &&
501
+ require_align(a->rs2, s->lmul);
502
+}
503
+
504
+GEN_VI_UNMASKED_TRANS(vaeskf1_vi, vaeskf1_check, ZVKNED_EGS)
505
+GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
135
--
506
--
136
2.48.1
507
2.41.0
diff view generated by jsdifflib
1
From: Max Chou <max.chou@sifive.com>
1
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
2
2
3
In prop_vlen_set function, there is an incorrect comparison between
3
This commit adds support for the Zvknh vector-crypto extension, which
4
vlen(bit) and vlenb(byte).
4
consists of the following instructions:
5
This will cause unexpected error when user applies the `vlen=1024` cpu
6
option with a vendor predefined cpu type that the default vlen is
7
1024(vlenb=128).
8
5
9
Fixes: 4f6d036ccc ("target/riscv/cpu.c: remove cpu->cfg.vlen")
6
* vsha2ms.vv
7
* vsha2c[hl].vv
8
9
Translation functions are defined in
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
14
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
15
[max.chou@sifive.com: Replaced vstart checking by TCG op]
16
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
17
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
18
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
10
Signed-off-by: Max Chou <max.chou@sifive.com>
19
Signed-off-by: Max Chou <max.chou@sifive.com>
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
20
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Message-ID: <20250124090539.2506448-1-max.chou@sifive.com>
21
[max.chou@sifive.com: Exposed x-zvknha & x-zvknhb properties]
22
[max.chou@sifive.com: Replaced SEW selection to happened during
23
translation]
24
Message-ID: <20230711165917.2629866-11-max.chou@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
25
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
26
---
15
target/riscv/cpu.c | 5 +++--
27
target/riscv/cpu_cfg.h | 2 +
16
1 file changed, 3 insertions(+), 2 deletions(-)
28
target/riscv/helper.h | 6 +
29
target/riscv/insn32.decode | 5 +
30
target/riscv/cpu.c | 13 +-
31
target/riscv/vcrypto_helper.c | 238 +++++++++++++++++++++++
32
target/riscv/insn_trans/trans_rvvk.c.inc | 129 ++++++++++++
33
6 files changed, 390 insertions(+), 3 deletions(-)
17
34
35
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
36
index XXXXXXX..XXXXXXX 100644
37
--- a/target/riscv/cpu_cfg.h
38
+++ b/target/riscv/cpu_cfg.h
39
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
40
bool ext_zvbb;
41
bool ext_zvbc;
42
bool ext_zvkned;
43
+ bool ext_zvknha;
44
+ bool ext_zvknhb;
45
bool ext_zmmul;
46
bool ext_zvfbfmin;
47
bool ext_zvfbfwma;
48
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
49
index XXXXXXX..XXXXXXX 100644
50
--- a/target/riscv/helper.h
51
+++ b/target/riscv/helper.h
52
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_4(vaesdm_vs, void, ptr, ptr, env, i32)
53
DEF_HELPER_4(vaesz_vs, void, ptr, ptr, env, i32)
54
DEF_HELPER_5(vaeskf1_vi, void, ptr, ptr, i32, env, i32)
55
DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32)
56
+
57
+DEF_HELPER_5(vsha2ms_vv, void, ptr, ptr, ptr, env, i32)
58
+DEF_HELPER_5(vsha2ch32_vv, void, ptr, ptr, ptr, env, i32)
59
+DEF_HELPER_5(vsha2ch64_vv, void, ptr, ptr, ptr, env, i32)
60
+DEF_HELPER_5(vsha2cl32_vv, void, ptr, ptr, ptr, env, i32)
61
+DEF_HELPER_5(vsha2cl64_vv, void, ptr, ptr, ptr, env, i32)
62
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
63
index XXXXXXX..XXXXXXX 100644
64
--- a/target/riscv/insn32.decode
65
+++ b/target/riscv/insn32.decode
66
@@ -XXX,XX +XXX,XX @@ vaesdm_vs 101001 1 ..... 00000 010 ..... 1110111 @r2_vm_1
67
vaesz_vs 101001 1 ..... 00111 010 ..... 1110111 @r2_vm_1
68
vaeskf1_vi 100010 1 ..... ..... 010 ..... 1110111 @r_vm_1
69
vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
70
+
71
+# *** Zvknh vector crypto extension ***
72
+vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1
73
+vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1
74
+vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
18
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
75
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
19
index XXXXXXX..XXXXXXX 100644
76
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/cpu.c
77
--- a/target/riscv/cpu.c
21
+++ b/target/riscv/cpu.c
78
+++ b/target/riscv/cpu.c
22
@@ -XXX,XX +XXX,XX @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
79
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
23
void *opaque, Error **errp)
80
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
24
{
81
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
25
RISCVCPU *cpu = RISCV_CPU(obj);
82
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
26
+ uint16_t cpu_vlen = cpu->cfg.vlenb << 3;
83
+ ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
27
uint16_t value;
84
+ ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
28
85
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
29
if (!visit_type_uint16(v, name, &value, errp)) {
86
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
30
@@ -XXX,XX +XXX,XX @@ static void prop_vlen_set(Object *obj, Visitor *v, const char *name,
87
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
88
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
89
* In principle Zve*x would also suffice here, were they supported
90
* in qemu
91
*/
92
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned) && !cpu->cfg.ext_zve32f) {
93
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha) &&
94
+ !cpu->cfg.ext_zve32f) {
95
error_setg(errp,
96
"Vector crypto extensions require V or Zve* extensions");
31
return;
97
return;
32
}
98
}
33
99
34
- if (value != cpu->cfg.vlenb && riscv_cpu_is_vendor(obj)) {
100
- if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
35
+ if (value != cpu_vlen && riscv_cpu_is_vendor(obj)) {
101
- error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
36
cpu_set_prop_err(cpu, name, errp);
102
+ if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) {
37
error_append_hint(errp, "Current '%s' val: %u\n",
103
+ error_setg(
38
- name, cpu->cfg.vlenb << 3);
104
+ errp,
39
+ name, cpu_vlen);
105
+ "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
40
return;
106
return;
41
}
107
}
42
108
109
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
110
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
111
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
112
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
113
+ DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
114
+ DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
115
116
DEFINE_PROP_END_OF_LIST(),
117
};
118
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
119
index XXXXXXX..XXXXXXX 100644
120
--- a/target/riscv/vcrypto_helper.c
121
+++ b/target/riscv/vcrypto_helper.c
122
@@ -XXX,XX +XXX,XX @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
123
/* set tail elements to 1s */
124
vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
125
}
126
+
127
+static inline uint32_t sig0_sha256(uint32_t x)
128
+{
129
+ return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3);
130
+}
131
+
132
+static inline uint32_t sig1_sha256(uint32_t x)
133
+{
134
+ return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10);
135
+}
136
+
137
+static inline uint64_t sig0_sha512(uint64_t x)
138
+{
139
+ return ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7);
140
+}
141
+
142
+static inline uint64_t sig1_sha512(uint64_t x)
143
+{
144
+ return ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6);
145
+}
146
+
147
+static inline void vsha2ms_e32(uint32_t *vd, uint32_t *vs1, uint32_t *vs2)
148
+{
149
+ uint32_t res[4];
150
+ res[0] = sig1_sha256(vs1[H4(2)]) + vs2[H4(1)] + sig0_sha256(vd[H4(1)]) +
151
+ vd[H4(0)];
152
+ res[1] = sig1_sha256(vs1[H4(3)]) + vs2[H4(2)] + sig0_sha256(vd[H4(2)]) +
153
+ vd[H4(1)];
154
+ res[2] =
155
+ sig1_sha256(res[0]) + vs2[H4(3)] + sig0_sha256(vd[H4(3)]) + vd[H4(2)];
156
+ res[3] =
157
+ sig1_sha256(res[1]) + vs1[H4(0)] + sig0_sha256(vs2[H4(0)]) + vd[H4(3)];
158
+ vd[H4(3)] = res[3];
159
+ vd[H4(2)] = res[2];
160
+ vd[H4(1)] = res[1];
161
+ vd[H4(0)] = res[0];
162
+}
163
+
164
+static inline void vsha2ms_e64(uint64_t *vd, uint64_t *vs1, uint64_t *vs2)
165
+{
166
+ uint64_t res[4];
167
+ res[0] = sig1_sha512(vs1[2]) + vs2[1] + sig0_sha512(vd[1]) + vd[0];
168
+ res[1] = sig1_sha512(vs1[3]) + vs2[2] + sig0_sha512(vd[2]) + vd[1];
169
+ res[2] = sig1_sha512(res[0]) + vs2[3] + sig0_sha512(vd[3]) + vd[2];
170
+ res[3] = sig1_sha512(res[1]) + vs1[0] + sig0_sha512(vs2[0]) + vd[3];
171
+ vd[3] = res[3];
172
+ vd[2] = res[2];
173
+ vd[1] = res[1];
174
+ vd[0] = res[0];
175
+}
176
+
177
+void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
178
+ uint32_t desc)
179
+{
180
+ uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
181
+ uint32_t esz = sew == MO_32 ? 4 : 8;
182
+ uint32_t total_elems;
183
+ uint32_t vta = vext_vta(desc);
184
+
185
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
186
+ if (sew == MO_32) {
187
+ vsha2ms_e32(((uint32_t *)vd) + i * 4, ((uint32_t *)vs1) + i * 4,
188
+ ((uint32_t *)vs2) + i * 4);
189
+ } else {
190
+ /* If not 32 then SEW should be 64 */
191
+ vsha2ms_e64(((uint64_t *)vd) + i * 4, ((uint64_t *)vs1) + i * 4,
192
+ ((uint64_t *)vs2) + i * 4);
193
+ }
194
+ }
195
+ /* set tail elements to 1s */
196
+ total_elems = vext_get_total_elems(env, desc, esz);
197
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
198
+ env->vstart = 0;
199
+}
200
+
201
+static inline uint64_t sum0_64(uint64_t x)
202
+{
203
+ return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39);
204
+}
205
+
206
+static inline uint32_t sum0_32(uint32_t x)
207
+{
208
+ return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22);
209
+}
210
+
211
+static inline uint64_t sum1_64(uint64_t x)
212
+{
213
+ return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41);
214
+}
215
+
216
+static inline uint32_t sum1_32(uint32_t x)
217
+{
218
+ return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25);
219
+}
220
+
221
+#define ch(x, y, z) ((x & y) ^ ((~x) & z))
222
+
223
+#define maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
224
+
225
+static void vsha2c_64(uint64_t *vs2, uint64_t *vd, uint64_t *vs1)
226
+{
227
+ uint64_t a = vs2[3], b = vs2[2], e = vs2[1], f = vs2[0];
228
+ uint64_t c = vd[3], d = vd[2], g = vd[1], h = vd[0];
229
+ uint64_t W0 = vs1[0], W1 = vs1[1];
230
+ uint64_t T1 = h + sum1_64(e) + ch(e, f, g) + W0;
231
+ uint64_t T2 = sum0_64(a) + maj(a, b, c);
232
+
233
+ h = g;
234
+ g = f;
235
+ f = e;
236
+ e = d + T1;
237
+ d = c;
238
+ c = b;
239
+ b = a;
240
+ a = T1 + T2;
241
+
242
+ T1 = h + sum1_64(e) + ch(e, f, g) + W1;
243
+ T2 = sum0_64(a) + maj(a, b, c);
244
+ h = g;
245
+ g = f;
246
+ f = e;
247
+ e = d + T1;
248
+ d = c;
249
+ c = b;
250
+ b = a;
251
+ a = T1 + T2;
252
+
253
+ vd[0] = f;
254
+ vd[1] = e;
255
+ vd[2] = b;
256
+ vd[3] = a;
257
+}
258
+
259
+static void vsha2c_32(uint32_t *vs2, uint32_t *vd, uint32_t *vs1)
260
+{
261
+ uint32_t a = vs2[H4(3)], b = vs2[H4(2)], e = vs2[H4(1)], f = vs2[H4(0)];
262
+ uint32_t c = vd[H4(3)], d = vd[H4(2)], g = vd[H4(1)], h = vd[H4(0)];
263
+ uint32_t W0 = vs1[H4(0)], W1 = vs1[H4(1)];
264
+ uint32_t T1 = h + sum1_32(e) + ch(e, f, g) + W0;
265
+ uint32_t T2 = sum0_32(a) + maj(a, b, c);
266
+
267
+ h = g;
268
+ g = f;
269
+ f = e;
270
+ e = d + T1;
271
+ d = c;
272
+ c = b;
273
+ b = a;
274
+ a = T1 + T2;
275
+
276
+ T1 = h + sum1_32(e) + ch(e, f, g) + W1;
277
+ T2 = sum0_32(a) + maj(a, b, c);
278
+ h = g;
279
+ g = f;
280
+ f = e;
281
+ e = d + T1;
282
+ d = c;
283
+ c = b;
284
+ b = a;
285
+ a = T1 + T2;
286
+
287
+ vd[H4(0)] = f;
288
+ vd[H4(1)] = e;
289
+ vd[H4(2)] = b;
290
+ vd[H4(3)] = a;
291
+}
292
+
293
+void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
294
+ uint32_t desc)
295
+{
296
+ const uint32_t esz = 4;
297
+ uint32_t total_elems;
298
+ uint32_t vta = vext_vta(desc);
299
+
300
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
301
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
302
+ ((uint32_t *)vs1) + 4 * i + 2);
303
+ }
304
+
305
+ /* set tail elements to 1s */
306
+ total_elems = vext_get_total_elems(env, desc, esz);
307
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
308
+ env->vstart = 0;
309
+}
310
+
311
+void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
312
+ uint32_t desc)
313
+{
314
+ const uint32_t esz = 8;
315
+ uint32_t total_elems;
316
+ uint32_t vta = vext_vta(desc);
317
+
318
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
319
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
320
+ ((uint64_t *)vs1) + 4 * i + 2);
321
+ }
322
+
323
+ /* set tail elements to 1s */
324
+ total_elems = vext_get_total_elems(env, desc, esz);
325
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
326
+ env->vstart = 0;
327
+}
328
+
329
+void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
330
+ uint32_t desc)
331
+{
332
+ const uint32_t esz = 4;
333
+ uint32_t total_elems;
334
+ uint32_t vta = vext_vta(desc);
335
+
336
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
337
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
338
+ (((uint32_t *)vs1) + 4 * i));
339
+ }
340
+
341
+ /* set tail elements to 1s */
342
+ total_elems = vext_get_total_elems(env, desc, esz);
343
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
344
+ env->vstart = 0;
345
+}
346
+
347
+void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
348
+ uint32_t desc)
349
+{
350
+ uint32_t esz = 8;
351
+ uint32_t total_elems;
352
+ uint32_t vta = vext_vta(desc);
353
+
354
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
355
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
356
+ (((uint64_t *)vs1) + 4 * i));
357
+ }
358
+
359
+ /* set tail elements to 1s */
360
+ total_elems = vext_get_total_elems(env, desc, esz);
361
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
362
+ env->vstart = 0;
363
+}
364
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
365
index XXXXXXX..XXXXXXX 100644
366
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
367
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
368
@@ -XXX,XX +XXX,XX @@ static bool vaeskf2_check(DisasContext *s, arg_vaeskf2_vi *a)
369
370
GEN_VI_UNMASKED_TRANS(vaeskf1_vi, vaeskf1_check, ZVKNED_EGS)
371
GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
372
+
373
+/*
374
+ * Zvknh
375
+ */
376
+
377
+#define ZVKNH_EGS 4
378
+
379
+#define GEN_VV_UNMASKED_TRANS(NAME, CHECK, EGS) \
380
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
381
+ { \
382
+ if (CHECK(s, a)) { \
383
+ uint32_t data = 0; \
384
+ TCGLabel *over = gen_new_label(); \
385
+ TCGv_i32 egs; \
386
+ \
387
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) { \
388
+ /* save opcode for unwinding in case we throw an exception */ \
389
+ decode_save_opc(s); \
390
+ egs = tcg_constant_i32(EGS); \
391
+ gen_helper_egs_check(egs, cpu_env); \
392
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
393
+ } \
394
+ \
395
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
396
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
397
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
398
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
399
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
400
+ \
401
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), \
402
+ vreg_ofs(s, a->rs2), cpu_env, \
403
+ s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
404
+ data, gen_helper_##NAME); \
405
+ \
406
+ mark_vs_dirty(s); \
407
+ gen_set_label(over); \
408
+ return true; \
409
+ } \
410
+ return false; \
411
+ }
412
+
413
+static bool vsha_check_sew(DisasContext *s)
414
+{
415
+ return (s->cfg_ptr->ext_zvknha == true && s->sew == MO_32) ||
416
+ (s->cfg_ptr->ext_zvknhb == true &&
417
+ (s->sew == MO_32 || s->sew == MO_64));
418
+}
419
+
420
+static bool vsha_check(DisasContext *s, arg_rmrr *a)
421
+{
422
+ int egw_bytes = ZVKNH_EGS << s->sew;
423
+ int mult = 1 << MAX(s->lmul, 0);
424
+ return opivv_check(s, a) &&
425
+ vsha_check_sew(s) &&
426
+ MAXSZ(s) >= egw_bytes &&
427
+ !is_overlapped(a->rd, mult, a->rs1, mult) &&
428
+ !is_overlapped(a->rd, mult, a->rs2, mult) &&
429
+ s->lmul >= 0;
430
+}
431
+
432
+GEN_VV_UNMASKED_TRANS(vsha2ms_vv, vsha_check, ZVKNH_EGS)
433
+
434
+static bool trans_vsha2cl_vv(DisasContext *s, arg_rmrr *a)
435
+{
436
+ if (vsha_check(s, a)) {
437
+ uint32_t data = 0;
438
+ TCGLabel *over = gen_new_label();
439
+ TCGv_i32 egs;
440
+
441
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
442
+ /* save opcode for unwinding in case we throw an exception */
443
+ decode_save_opc(s);
444
+ egs = tcg_constant_i32(ZVKNH_EGS);
445
+ gen_helper_egs_check(egs, cpu_env);
446
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
447
+ }
448
+
449
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
450
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
451
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
452
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
453
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
454
+
455
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
456
+ vreg_ofs(s, a->rs2), cpu_env, s->cfg_ptr->vlen / 8,
457
+ s->cfg_ptr->vlen / 8, data,
458
+ s->sew == MO_32 ?
459
+ gen_helper_vsha2cl32_vv : gen_helper_vsha2cl64_vv);
460
+
461
+ mark_vs_dirty(s);
462
+ gen_set_label(over);
463
+ return true;
464
+ }
465
+ return false;
466
+}
467
+
468
+static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
469
+{
470
+ if (vsha_check(s, a)) {
471
+ uint32_t data = 0;
472
+ TCGLabel *over = gen_new_label();
473
+ TCGv_i32 egs;
474
+
475
+ if (!s->vstart_eq_zero || !s->vl_eq_vlmax) {
476
+ /* save opcode for unwinding in case we throw an exception */
477
+ decode_save_opc(s);
478
+ egs = tcg_constant_i32(ZVKNH_EGS);
479
+ gen_helper_egs_check(egs, cpu_env);
480
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
481
+ }
482
+
483
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
484
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
485
+ data = FIELD_DP32(data, VDATA, VTA, s->vta);
486
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
487
+ data = FIELD_DP32(data, VDATA, VMA, s->vma);
488
+
489
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
490
+ vreg_ofs(s, a->rs2), cpu_env, s->cfg_ptr->vlen / 8,
491
+ s->cfg_ptr->vlen / 8, data,
492
+ s->sew == MO_32 ?
493
+ gen_helper_vsha2ch32_vv : gen_helper_vsha2ch64_vv);
494
+
495
+ mark_vs_dirty(s);
496
+ gen_set_label(over);
497
+ return true;
498
+ }
499
+ return false;
500
+}
43
--
501
--
44
2.48.1
502
2.41.0
diff view generated by jsdifflib
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
1
From: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
2
2
3
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
3
This commit adds support for the Zvksh vector-crypto extension, which
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
consists of the following instructions:
5
Reviewed-by: Jason Chien <jason.chien@sifive.com>
5
6
Message-ID: <20250205-b4-ctr_upstream_v6-v6-1-439d8e06c8ef@rivosinc.com>
6
* vsm3me.vv
7
* vsm3c.vi
8
9
Translation functions are defined in
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
`target/riscv/vcrypto_helper.c`.
12
13
Co-authored-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
14
[max.chou@sifive.com: Replaced vstart checking by TCG op]
15
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
16
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
17
Signed-off-by: Max Chou <max.chou@sifive.com>
18
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
19
[max.chou@sifive.com: Exposed x-zvksh property]
20
Message-ID: <20230711165917.2629866-12-max.chou@sifive.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
22
---
9
target/riscv/insn32.decode | 1 -
23
target/riscv/cpu_cfg.h | 1 +
10
target/riscv/insn_trans/trans_privileged.c.inc | 5 -----
24
target/riscv/helper.h | 3 +
11
2 files changed, 6 deletions(-)
25
target/riscv/insn32.decode | 4 +
12
26
target/riscv/cpu.c | 6 +-
27
target/riscv/vcrypto_helper.c | 134 +++++++++++++++++++++++
28
target/riscv/insn_trans/trans_rvvk.c.inc | 31 ++++++
29
6 files changed, 177 insertions(+), 2 deletions(-)
30
31
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/cpu_cfg.h
34
+++ b/target/riscv/cpu_cfg.h
35
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
36
bool ext_zvkned;
37
bool ext_zvknha;
38
bool ext_zvknhb;
39
+ bool ext_zvksh;
40
bool ext_zmmul;
41
bool ext_zvfbfmin;
42
bool ext_zvfbfwma;
43
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/riscv/helper.h
46
+++ b/target/riscv/helper.h
47
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vsha2ch32_vv, void, ptr, ptr, ptr, env, i32)
48
DEF_HELPER_5(vsha2ch64_vv, void, ptr, ptr, ptr, env, i32)
49
DEF_HELPER_5(vsha2cl32_vv, void, ptr, ptr, ptr, env, i32)
50
DEF_HELPER_5(vsha2cl64_vv, void, ptr, ptr, ptr, env, i32)
51
+
52
+DEF_HELPER_5(vsm3me_vv, void, ptr, ptr, ptr, env, i32)
53
+DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
13
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
54
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
14
index XXXXXXX..XXXXXXX 100644
55
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/insn32.decode
56
--- a/target/riscv/insn32.decode
16
+++ b/target/riscv/insn32.decode
57
+++ b/target/riscv/insn32.decode
17
@@ -XXX,XX +XXX,XX @@ sret 0001000 00010 00000 000 00000 1110011
58
@@ -XXX,XX +XXX,XX @@ vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
18
mret 0011000 00010 00000 000 00000 1110011
59
vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1
19
wfi 0001000 00101 00000 000 00000 1110011
60
vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1
20
sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
61
vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
21
-sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
62
+
22
63
+# *** Zvksh vector crypto extension ***
23
# *** NMI ***
64
+vsm3me_vv 100000 1 ..... ..... 010 ..... 1110111 @r_vm_1
24
mnret 0111000 00010 00000 000 00000 1110011
65
+vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
25
diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc
66
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
26
index XXXXXXX..XXXXXXX 100644
67
index XXXXXXX..XXXXXXX 100644
27
--- a/target/riscv/insn_trans/trans_privileged.c.inc
68
--- a/target/riscv/cpu.c
28
+++ b/target/riscv/insn_trans/trans_privileged.c.inc
69
+++ b/target/riscv/cpu.c
29
@@ -XXX,XX +XXX,XX @@ static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a)
70
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
30
#endif
71
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
72
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
73
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
74
+ ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
75
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
76
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
77
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
78
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
79
* In principle Zve*x would also suffice here, were they supported
80
* in qemu
81
*/
82
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha) &&
83
- !cpu->cfg.ext_zve32f) {
84
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha ||
85
+ cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
86
error_setg(errp,
87
"Vector crypto extensions require V or Zve* extensions");
88
return;
89
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
90
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
91
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
92
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
93
+ DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false),
94
95
DEFINE_PROP_END_OF_LIST(),
96
};
97
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
98
index XXXXXXX..XXXXXXX 100644
99
--- a/target/riscv/vcrypto_helper.c
100
+++ b/target/riscv/vcrypto_helper.c
101
@@ -XXX,XX +XXX,XX @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
102
vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
103
env->vstart = 0;
104
}
105
+
106
+static inline uint32_t p1(uint32_t x)
107
+{
108
+ return x ^ rol32(x, 15) ^ rol32(x, 23);
109
+}
110
+
111
+static inline uint32_t zvksh_w(uint32_t m16, uint32_t m9, uint32_t m3,
112
+ uint32_t m13, uint32_t m6)
113
+{
114
+ return p1(m16 ^ m9 ^ rol32(m3, 15)) ^ rol32(m13, 7) ^ m6;
115
+}
116
+
117
+void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
118
+ CPURISCVState *env, uint32_t desc)
119
+{
120
+ uint32_t esz = memop_size(FIELD_EX64(env->vtype, VTYPE, VSEW));
121
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
122
+ uint32_t vta = vext_vta(desc);
123
+ uint32_t *vd = vd_vptr;
124
+ uint32_t *vs1 = vs1_vptr;
125
+ uint32_t *vs2 = vs2_vptr;
126
+
127
+ for (int i = env->vstart / 8; i < env->vl / 8; i++) {
128
+ uint32_t w[24];
129
+ for (int j = 0; j < 8; j++) {
130
+ w[j] = bswap32(vs1[H4((i * 8) + j)]);
131
+ w[j + 8] = bswap32(vs2[H4((i * 8) + j)]);
132
+ }
133
+ for (int j = 0; j < 8; j++) {
134
+ w[j + 16] =
135
+ zvksh_w(w[j], w[j + 7], w[j + 13], w[j + 3], w[j + 10]);
136
+ }
137
+ for (int j = 0; j < 8; j++) {
138
+ vd[(i * 8) + j] = bswap32(w[H4(j + 16)]);
139
+ }
140
+ }
141
+ vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
142
+ env->vstart = 0;
143
+}
144
+
145
+static inline uint32_t ff1(uint32_t x, uint32_t y, uint32_t z)
146
+{
147
+ return x ^ y ^ z;
148
+}
149
+
150
+static inline uint32_t ff2(uint32_t x, uint32_t y, uint32_t z)
151
+{
152
+ return (x & y) | (x & z) | (y & z);
153
+}
154
+
155
+static inline uint32_t ff_j(uint32_t x, uint32_t y, uint32_t z, uint32_t j)
156
+{
157
+ return (j <= 15) ? ff1(x, y, z) : ff2(x, y, z);
158
+}
159
+
160
+static inline uint32_t gg1(uint32_t x, uint32_t y, uint32_t z)
161
+{
162
+ return x ^ y ^ z;
163
+}
164
+
165
+static inline uint32_t gg2(uint32_t x, uint32_t y, uint32_t z)
166
+{
167
+ return (x & y) | (~x & z);
168
+}
169
+
170
+static inline uint32_t gg_j(uint32_t x, uint32_t y, uint32_t z, uint32_t j)
171
+{
172
+ return (j <= 15) ? gg1(x, y, z) : gg2(x, y, z);
173
+}
174
+
175
+static inline uint32_t t_j(uint32_t j)
176
+{
177
+ return (j <= 15) ? 0x79cc4519 : 0x7a879d8a;
178
+}
179
+
180
+static inline uint32_t p_0(uint32_t x)
181
+{
182
+ return x ^ rol32(x, 9) ^ rol32(x, 17);
183
+}
184
+
185
+static void sm3c(uint32_t *vd, uint32_t *vs1, uint32_t *vs2, uint32_t uimm)
186
+{
187
+ uint32_t x0, x1;
188
+ uint32_t j;
189
+ uint32_t ss1, ss2, tt1, tt2;
190
+ x0 = vs2[0] ^ vs2[4];
191
+ x1 = vs2[1] ^ vs2[5];
192
+ j = 2 * uimm;
193
+ ss1 = rol32(rol32(vs1[0], 12) + vs1[4] + rol32(t_j(j), j % 32), 7);
194
+ ss2 = ss1 ^ rol32(vs1[0], 12);
195
+ tt1 = ff_j(vs1[0], vs1[1], vs1[2], j) + vs1[3] + ss2 + x0;
196
+ tt2 = gg_j(vs1[4], vs1[5], vs1[6], j) + vs1[7] + ss1 + vs2[0];
197
+ vs1[3] = vs1[2];
198
+ vd[3] = rol32(vs1[1], 9);
199
+ vs1[1] = vs1[0];
200
+ vd[1] = tt1;
201
+ vs1[7] = vs1[6];
202
+ vd[7] = rol32(vs1[5], 19);
203
+ vs1[5] = vs1[4];
204
+ vd[5] = p_0(tt2);
205
+ j = 2 * uimm + 1;
206
+ ss1 = rol32(rol32(vd[1], 12) + vd[5] + rol32(t_j(j), j % 32), 7);
207
+ ss2 = ss1 ^ rol32(vd[1], 12);
208
+ tt1 = ff_j(vd[1], vs1[1], vd[3], j) + vs1[3] + ss2 + x1;
209
+ tt2 = gg_j(vd[5], vs1[5], vd[7], j) + vs1[7] + ss1 + vs2[1];
210
+ vd[2] = rol32(vs1[1], 9);
211
+ vd[0] = tt1;
212
+ vd[6] = rol32(vs1[5], 19);
213
+ vd[4] = p_0(tt2);
214
+}
215
+
216
+void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
217
+ CPURISCVState *env, uint32_t desc)
218
+{
219
+ uint32_t esz = memop_size(FIELD_EX64(env->vtype, VTYPE, VSEW));
220
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
221
+ uint32_t vta = vext_vta(desc);
222
+ uint32_t *vd = vd_vptr;
223
+ uint32_t *vs2 = vs2_vptr;
224
+ uint32_t v1[8], v2[8], v3[8];
225
+
226
+ for (int i = env->vstart / 8; i < env->vl / 8; i++) {
227
+ for (int k = 0; k < 8; k++) {
228
+ v2[k] = bswap32(vd[H4(i * 8 + k)]);
229
+ v3[k] = bswap32(vs2[H4(i * 8 + k)]);
230
+ }
231
+ sm3c(v1, v2, v3, uimm);
232
+ for (int k = 0; k < 8; k++) {
233
+ vd[i * 8 + k] = bswap32(v1[H4(k)]);
234
+ }
235
+ }
236
+ vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
237
+ env->vstart = 0;
238
+}
239
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
240
index XXXXXXX..XXXXXXX 100644
241
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
242
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
243
@@ -XXX,XX +XXX,XX @@ static bool trans_vsha2ch_vv(DisasContext *s, arg_rmrr *a)
244
}
31
return false;
245
return false;
32
}
246
}
33
-
247
+
34
-static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a)
248
+/*
35
-{
249
+ * Zvksh
36
- return false;
250
+ */
37
-}
251
+
252
+#define ZVKSH_EGS 8
253
+
254
+static inline bool vsm3_check(DisasContext *s, arg_rmrr *a)
255
+{
256
+ int egw_bytes = ZVKSH_EGS << s->sew;
257
+ int mult = 1 << MAX(s->lmul, 0);
258
+ return s->cfg_ptr->ext_zvksh == true &&
259
+ require_rvv(s) &&
260
+ vext_check_isa_ill(s) &&
261
+ !is_overlapped(a->rd, mult, a->rs2, mult) &&
262
+ MAXSZ(s) >= egw_bytes &&
263
+ s->sew == MO_32;
264
+}
265
+
266
+static inline bool vsm3me_check(DisasContext *s, arg_rmrr *a)
267
+{
268
+ return vsm3_check(s, a) && vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
269
+}
270
+
271
+static inline bool vsm3c_check(DisasContext *s, arg_rmrr *a)
272
+{
273
+ return vsm3_check(s, a) && vext_check_ss(s, a->rd, a->rs2, a->vm);
274
+}
275
+
276
+GEN_VV_UNMASKED_TRANS(vsm3me_vv, vsm3me_check, ZVKSH_EGS)
277
+GEN_VI_UNMASKED_TRANS(vsm3c_vi, vsm3c_check, ZVKSH_EGS)
38
--
278
--
39
2.48.1
279
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
2
2
3
Add RVA23U64 as described in [1]. Add it as a child of RVA22U64 since
3
This commit adds support for the Zvkg vector-crypto extension, which
4
all RVA22U64 mandatory extensions are also present in RVA23U64. What's
4
consists of the following instructions:
5
left then is to list the mandatory extensions that are RVA23 only.
5
6
6
* vgmul.vv
7
A new "rva23u64" CPU is also added.
7
* vghsh.vv
8
8
9
[1] https://github.com/riscv/riscv-profiles/blob/main/src/rva23-profile.adoc
9
Translation functions are defined in
10
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
11
`target/riscv/vcrypto_helper.c`.
12
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
12
13
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
14
Message-ID: <20250115184316.2344583-6-dbarboza@ventanamicro.com>
14
[max.chou@sifive.com: Replaced vstart checking by TCG op]
15
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
16
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
17
Signed-off-by: Max Chou <max.chou@sifive.com>
18
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
19
[max.chou@sifive.com: Exposed x-zvkg property]
20
[max.chou@sifive.com: Replaced uint by int for cross win32 build]
21
Message-ID: <20230711165917.2629866-13-max.chou@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
22
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
23
---
17
target/riscv/cpu-qom.h | 1 +
24
target/riscv/cpu_cfg.h | 1 +
18
target/riscv/cpu.c | 33 +++++++++++++++++++++++++++++++++
25
target/riscv/helper.h | 3 +
19
2 files changed, 34 insertions(+)
26
target/riscv/insn32.decode | 4 ++
20
27
target/riscv/cpu.c | 6 +-
21
diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h
28
target/riscv/vcrypto_helper.c | 72 ++++++++++++++++++++++++
22
index XXXXXXX..XXXXXXX 100644
29
target/riscv/insn_trans/trans_rvvk.c.inc | 30 ++++++++++
23
--- a/target/riscv/cpu-qom.h
30
6 files changed, 114 insertions(+), 2 deletions(-)
24
+++ b/target/riscv/cpu-qom.h
31
25
@@ -XXX,XX +XXX,XX @@
32
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
26
#define TYPE_RISCV_CPU_RV64E RISCV_CPU_TYPE_NAME("rv64e")
33
index XXXXXXX..XXXXXXX 100644
27
#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64")
34
--- a/target/riscv/cpu_cfg.h
28
#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64")
35
+++ b/target/riscv/cpu_cfg.h
29
+#define TYPE_RISCV_CPU_RVA23U64 RISCV_CPU_TYPE_NAME("rva23u64")
36
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
30
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
37
bool ext_zve64d;
31
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
38
bool ext_zvbb;
32
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
39
bool ext_zvbc;
40
+ bool ext_zvkg;
41
bool ext_zvkned;
42
bool ext_zvknha;
43
bool ext_zvknhb;
44
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
45
index XXXXXXX..XXXXXXX 100644
46
--- a/target/riscv/helper.h
47
+++ b/target/riscv/helper.h
48
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vsha2cl64_vv, void, ptr, ptr, ptr, env, i32)
49
50
DEF_HELPER_5(vsm3me_vv, void, ptr, ptr, ptr, env, i32)
51
DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
52
+
53
+DEF_HELPER_5(vghsh_vv, void, ptr, ptr, ptr, env, i32)
54
+DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32)
55
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/riscv/insn32.decode
58
+++ b/target/riscv/insn32.decode
59
@@ -XXX,XX +XXX,XX @@ vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
60
# *** Zvksh vector crypto extension ***
61
vsm3me_vv 100000 1 ..... ..... 010 ..... 1110111 @r_vm_1
62
vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
63
+
64
+# *** Zvkg vector crypto extension ***
65
+vghsh_vv 101100 1 ..... ..... 010 ..... 1110111 @r_vm_1
66
+vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
33
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
67
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
34
index XXXXXXX..XXXXXXX 100644
68
index XXXXXXX..XXXXXXX 100644
35
--- a/target/riscv/cpu.c
69
--- a/target/riscv/cpu.c
36
+++ b/target/riscv/cpu.c
70
+++ b/target/riscv/cpu.c
37
@@ -XXX,XX +XXX,XX @@ static RISCVCPUProfile RVA22S64 = {
71
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
38
}
72
ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
39
};
73
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
40
74
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
75
+ ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
76
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
77
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
78
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
79
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
80
* In principle Zve*x would also suffice here, were they supported
81
* in qemu
82
*/
83
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha ||
84
- cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
85
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
86
+ cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
87
error_setg(errp,
88
"Vector crypto extensions require V or Zve* extensions");
89
return;
90
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
91
/* Vector cryptography extensions */
92
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
93
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
94
+ DEFINE_PROP_BOOL("x-zvkg", RISCVCPU, cfg.ext_zvkg, false),
95
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
96
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
97
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
98
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
99
index XXXXXXX..XXXXXXX 100644
100
--- a/target/riscv/vcrypto_helper.c
101
+++ b/target/riscv/vcrypto_helper.c
102
@@ -XXX,XX +XXX,XX @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
103
vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
104
env->vstart = 0;
105
}
106
+
107
+void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
108
+ CPURISCVState *env, uint32_t desc)
109
+{
110
+ uint64_t *vd = vd_vptr;
111
+ uint64_t *vs1 = vs1_vptr;
112
+ uint64_t *vs2 = vs2_vptr;
113
+ uint32_t vta = vext_vta(desc);
114
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
115
+
116
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
117
+ uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
118
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
119
+ uint64_t X[2] = {vs1[i * 2 + 0], vs1[i * 2 + 1]};
120
+ uint64_t Z[2] = {0, 0};
121
+
122
+ uint64_t S[2] = {brev8(Y[0] ^ X[0]), brev8(Y[1] ^ X[1])};
123
+
124
+ for (int j = 0; j < 128; j++) {
125
+ if ((S[j / 64] >> (j % 64)) & 1) {
126
+ Z[0] ^= H[0];
127
+ Z[1] ^= H[1];
128
+ }
129
+ bool reduce = ((H[1] >> 63) & 1);
130
+ H[1] = H[1] << 1 | H[0] >> 63;
131
+ H[0] = H[0] << 1;
132
+ if (reduce) {
133
+ H[0] ^= 0x87;
134
+ }
135
+ }
136
+
137
+ vd[i * 2 + 0] = brev8(Z[0]);
138
+ vd[i * 2 + 1] = brev8(Z[1]);
139
+ }
140
+ /* set tail elements to 1s */
141
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
142
+ env->vstart = 0;
143
+}
144
+
145
+void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
146
+ uint32_t desc)
147
+{
148
+ uint64_t *vd = vd_vptr;
149
+ uint64_t *vs2 = vs2_vptr;
150
+ uint32_t vta = vext_vta(desc);
151
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
152
+
153
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
154
+ uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
155
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
156
+ uint64_t Z[2] = {0, 0};
157
+
158
+ for (int j = 0; j < 128; j++) {
159
+ if ((Y[j / 64] >> (j % 64)) & 1) {
160
+ Z[0] ^= H[0];
161
+ Z[1] ^= H[1];
162
+ }
163
+ bool reduce = ((H[1] >> 63) & 1);
164
+ H[1] = H[1] << 1 | H[0] >> 63;
165
+ H[0] = H[0] << 1;
166
+ if (reduce) {
167
+ H[0] ^= 0x87;
168
+ }
169
+ }
170
+
171
+ vd[i * 2 + 0] = brev8(Z[0]);
172
+ vd[i * 2 + 1] = brev8(Z[1]);
173
+ }
174
+ /* set tail elements to 1s */
175
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
176
+ env->vstart = 0;
177
+}
178
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
179
index XXXXXXX..XXXXXXX 100644
180
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
181
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
182
@@ -XXX,XX +XXX,XX @@ static inline bool vsm3c_check(DisasContext *s, arg_rmrr *a)
183
184
GEN_VV_UNMASKED_TRANS(vsm3me_vv, vsm3me_check, ZVKSH_EGS)
185
GEN_VI_UNMASKED_TRANS(vsm3c_vi, vsm3c_check, ZVKSH_EGS)
186
+
41
+/*
187
+/*
42
+ * All mandatory extensions from RVA22U64 are present
188
+ * Zvkg
43
+ * in RVA23U64 so set RVA22 as a parent. We need to
44
+ * declare just the newly added mandatory extensions.
45
+ */
189
+ */
46
+static RISCVCPUProfile RVA23U64 = {
190
+
47
+ .u_parent = &RVA22U64,
191
+#define ZVKG_EGS 4
48
+ .s_parent = NULL,
192
+
49
+ .name = "rva23u64",
193
+static bool vgmul_check(DisasContext *s, arg_rmr *a)
50
+ .misa_ext = RVV,
194
+{
51
+ .priv_spec = RISCV_PROFILE_ATTR_UNUSED,
195
+ int egw_bytes = ZVKG_EGS << s->sew;
52
+ .satp_mode = RISCV_PROFILE_ATTR_UNUSED,
196
+ return s->cfg_ptr->ext_zvkg == true &&
53
+ .ext_offsets = {
197
+ vext_check_isa_ill(s) &&
54
+ CPU_CFG_OFFSET(ext_zvfhmin), CPU_CFG_OFFSET(ext_zvbb),
198
+ require_rvv(s) &&
55
+ CPU_CFG_OFFSET(ext_zvkt), CPU_CFG_OFFSET(ext_zihintntl),
199
+ MAXSZ(s) >= egw_bytes &&
56
+ CPU_CFG_OFFSET(ext_zicond), CPU_CFG_OFFSET(ext_zimop),
200
+ vext_check_ss(s, a->rd, a->rs2, a->vm) &&
57
+ CPU_CFG_OFFSET(ext_zcmop), CPU_CFG_OFFSET(ext_zcb),
201
+ s->sew == MO_32;
58
+ CPU_CFG_OFFSET(ext_zfa), CPU_CFG_OFFSET(ext_zawrs),
202
+}
59
+ CPU_CFG_OFFSET(ext_supm),
203
+
60
+
204
+GEN_V_UNMASKED_TRANS(vgmul_vv, vgmul_check, ZVKG_EGS)
61
+ RISCV_PROFILE_EXT_LIST_END
205
+
62
+ }
206
+static bool vghsh_check(DisasContext *s, arg_rmrr *a)
63
+};
207
+{
64
+
208
+ int egw_bytes = ZVKG_EGS << s->sew;
65
RISCVCPUProfile *riscv_profiles[] = {
209
+ return s->cfg_ptr->ext_zvkg == true &&
66
&RVA22U64,
210
+ opivv_check(s, a) &&
67
&RVA22S64,
211
+ MAXSZ(s) >= egw_bytes &&
68
+ &RVA23U64,
212
+ s->sew == MO_32;
69
NULL,
213
+}
70
};
214
+
71
215
+GEN_VV_UNMASKED_TRANS(vghsh_vv, vghsh_check, ZVKG_EGS)
72
@@ -XXX,XX +XXX,XX @@ static void rva22s64_profile_cpu_init(Object *obj)
73
74
RVA22S64.enabled = true;
75
}
76
+
77
+static void rva23u64_profile_cpu_init(Object *obj)
78
+{
79
+ rv64i_bare_cpu_init(obj);
80
+
81
+ RVA23U64.enabled = true;
82
+}
83
#endif
84
85
static const gchar *riscv_gdb_arch_name(CPUState *cs)
86
@@ -XXX,XX +XXX,XX @@ static const TypeInfo riscv_cpu_type_infos[] = {
87
DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64E, MXL_RV64, rv64e_bare_cpu_init),
88
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, MXL_RV64, rva22u64_profile_cpu_init),
89
DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, MXL_RV64, rva22s64_profile_cpu_init),
90
+ DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA23U64, MXL_RV64, rva23u64_profile_cpu_init),
91
#endif /* TARGET_RISCV64 */
92
};
93
94
--
216
--
95
2.48.1
217
2.41.0
diff view generated by jsdifflib
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
From: Max Chou <max.chou@sifive.com>
2
2
3
RISCV_IOMMU_REG_IOCOUNTINH is done by riscv_iommu_process_iocntinh_cy(),
3
Allows sharing of sm4_subword between different targets.
4
which is called during riscv_iommu_mmio_write() callback via a new
5
riscv_iommu_pricess_hpm_writes() helper.
6
4
7
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
5
Signed-off-by: Max Chou <max.chou@sifive.com>
8
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
6
Reviewed-by: Frank Chang <frank.chang@sifive.com>
9
Acked-by: Alistair Francis <alistair.francis@wdc.com>
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
10
Message-ID: <20250224190826.1858473-7-dbarboza@ventanamicro.com>
8
Signed-off-by: Max Chou <max.chou@sifive.com>
9
Message-ID: <20230711165917.2629866-14-max.chou@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
11
---
13
hw/riscv/riscv-iommu-hpm.h | 1 +
12
include/crypto/sm4.h | 8 ++++++++
14
hw/riscv/riscv-iommu-hpm.c | 60 ++++++++++++++++++++++++++++++++++++++
13
target/arm/tcg/crypto_helper.c | 10 ++--------
15
hw/riscv/riscv-iommu.c | 38 ++++++++++++++++++++++++
14
2 files changed, 10 insertions(+), 8 deletions(-)
16
3 files changed, 99 insertions(+)
17
15
18
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
16
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/riscv/riscv-iommu-hpm.h
18
--- a/include/crypto/sm4.h
21
+++ b/hw/riscv/riscv-iommu-hpm.h
19
+++ b/include/crypto/sm4.h
22
@@ -XXX,XX +XXX,XX @@ uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
20
@@ -XXX,XX +XXX,XX @@
23
void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
21
24
unsigned event_id);
22
extern const uint8_t sm4_sbox[256];
25
void riscv_iommu_hpm_timer_cb(void *priv);
23
26
+void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
24
+static inline uint32_t sm4_subword(uint32_t word)
27
28
#endif
29
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/riscv/riscv-iommu-hpm.c
32
+++ b/hw/riscv/riscv-iommu-hpm.c
33
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_hpm_timer_cb(void *priv)
34
riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
35
}
36
}
37
+
38
+static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value)
39
+{
25
+{
40
+ const uint32_t inhibit = riscv_iommu_reg_get32(
26
+ return sm4_sbox[word & 0xff] |
41
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
27
+ sm4_sbox[(word >> 8) & 0xff] << 8 |
42
+ uint64_t overflow_at, overflow_ns;
28
+ sm4_sbox[(word >> 16) & 0xff] << 16 |
43
+
29
+ sm4_sbox[(word >> 24) & 0xff] << 24;
44
+ if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
45
+ return;
46
+ }
47
+
48
+ /*
49
+ * We are using INT64_MAX here instead to UINT64_MAX because cycle counter
50
+ * has 63-bit precision and INT64_MAX is the maximum it can store.
51
+ */
52
+ if (value) {
53
+ overflow_ns = INT64_MAX - value + 1;
54
+ } else {
55
+ overflow_ns = INT64_MAX;
56
+ }
57
+
58
+ overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns;
59
+
60
+ if (overflow_at > INT64_MAX) {
61
+ s->irq_overflow_left = overflow_at - INT64_MAX;
62
+ overflow_at = INT64_MAX;
63
+ }
64
+
65
+ timer_mod_anticipate_ns(s->hpm_timer, overflow_at);
66
+}
30
+}
67
+
31
+
68
+/* Updates the internal cycle counter state when iocntinh:CY is changed. */
32
#endif
69
+void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
33
diff --git a/target/arm/tcg/crypto_helper.c b/target/arm/tcg/crypto_helper.c
70
+{
71
+ const uint32_t inhibit = riscv_iommu_reg_get32(
72
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
73
+
74
+ /* We only need to process CY bit toggle. */
75
+ if (!(inhibit ^ prev_cy_inh)) {
76
+ return;
77
+ }
78
+
79
+ if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
80
+ /*
81
+ * Cycle counter is enabled. Just start the timer again and update
82
+ * the clock snapshot value to point to the current time to make
83
+ * sure iohpmcycles read is correct.
84
+ */
85
+ s->hpmcycle_prev = get_cycles();
86
+ hpm_setup_timer(s, s->hpmcycle_val);
87
+ } else {
88
+ /*
89
+ * Cycle counter is disabled. Stop the timer and update the cycle
90
+ * counter to record the current value which is last programmed
91
+ * value + the cycles passed so far.
92
+ */
93
+ s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - s->hpmcycle_prev);
94
+ timer_del(s->hpm_timer);
95
+ }
96
+}
97
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
98
index XXXXXXX..XXXXXXX 100644
34
index XXXXXXX..XXXXXXX 100644
99
--- a/hw/riscv/riscv-iommu.c
35
--- a/target/arm/tcg/crypto_helper.c
100
+++ b/hw/riscv/riscv-iommu.c
36
+++ b/target/arm/tcg/crypto_helper.c
101
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_update_ipsr(RISCVIOMMUState *s, uint64_t data)
37
@@ -XXX,XX +XXX,XX @@ static void do_crypto_sm4e(uint64_t *rd, uint64_t *rn, uint64_t *rm)
102
riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IPSR, ipsr_set, ipsr_clr);
38
CR_ST_WORD(d, (i + 3) % 4) ^
103
}
39
CR_ST_WORD(n, i);
104
40
105
+static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
41
- t = sm4_sbox[t & 0xff] |
106
+ uint32_t regb,
42
- sm4_sbox[(t >> 8) & 0xff] << 8 |
107
+ bool prev_cy_inh)
43
- sm4_sbox[(t >> 16) & 0xff] << 16 |
108
+{
44
- sm4_sbox[(t >> 24) & 0xff] << 24;
109
+ switch (regb) {
45
+ t = sm4_subword(t);
110
+ case RISCV_IOMMU_REG_IOCOUNTINH:
46
111
+ riscv_iommu_process_iocntinh_cy(s, prev_cy_inh);
47
CR_ST_WORD(d, i) ^= t ^ rol32(t, 2) ^ rol32(t, 10) ^ rol32(t, 18) ^
112
+ break;
48
rol32(t, 24);
113
+
49
@@ -XXX,XX +XXX,XX @@ static void do_crypto_sm4ekey(uint64_t *rd, uint64_t *rn, uint64_t *rm)
114
+ case RISCV_IOMMU_REG_IOHPMCYCLES:
50
CR_ST_WORD(d, (i + 3) % 4) ^
115
+ case RISCV_IOMMU_REG_IOHPMCYCLES + 4:
51
CR_ST_WORD(m, i);
116
+ /* not yet implemented */
52
117
+ break;
53
- t = sm4_sbox[t & 0xff] |
118
+
54
- sm4_sbox[(t >> 8) & 0xff] << 8 |
119
+ case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
55
- sm4_sbox[(t >> 16) & 0xff] << 16 |
120
+ RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4:
56
- sm4_sbox[(t >> 24) & 0xff] << 24;
121
+ /* not yet implemented */
57
+ t = sm4_subword(t);
122
+ break;
58
123
+ }
59
CR_ST_WORD(d, i) ^= t ^ rol32(t, 13) ^ rol32(t, 23);
124
+}
125
+
126
/*
127
* Write the resulting value of 'data' for the reg specified
128
* by 'reg_addr', after considering read-only/read-write/write-clear
129
@@ -XXX,XX +XXX,XX @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
130
uint32_t regb = addr & ~3;
131
uint32_t busy = 0;
132
uint64_t val = 0;
133
+ bool cy_inh = false;
134
135
if ((addr & (size - 1)) != 0) {
136
/* Unsupported MMIO alignment or access size */
137
@@ -XXX,XX +XXX,XX @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
138
busy = RISCV_IOMMU_TR_REQ_CTL_GO_BUSY;
139
break;
140
141
+ case RISCV_IOMMU_REG_IOCOUNTINH:
142
+ if (addr != RISCV_IOMMU_REG_IOCOUNTINH) {
143
+ break;
144
+ }
145
+ /* Store previous value of CY bit. */
146
+ cy_inh = !!(riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTINH) &
147
+ RISCV_IOMMU_IOCOUNTINH_CY);
148
+ break;
149
+
150
+
151
default:
152
break;
153
}
154
@@ -XXX,XX +XXX,XX @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, hwaddr addr,
155
stl_le_p(&s->regs_rw[regb], rw | busy);
156
}
157
158
+ /* Process HPM writes and update any internal state if needed. */
159
+ if (regb >= RISCV_IOMMU_REG_IOCOUNTOVF &&
160
+ regb <= (RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4)) {
161
+ riscv_iommu_process_hpm_writes(s, regb, cy_inh);
162
+ }
163
+
164
if (process_fn) {
165
process_fn(s);
166
}
60
}
167
--
61
--
168
2.48.1
62
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Max Chou <max.chou@sifive.com>
2
2
3
Expose ziccrse, zabha and svvptc.
3
Adds sm4_ck constant for use in sm4 cryptography across different targets.
4
4
5
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
5
Signed-off-by: Max Chou <max.chou@sifive.com>
6
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
6
Reviewed-by: Frank Chang <frank.chang@sifive.com>
7
Message-ID: <20250221153758.652078-4-dbarboza@ventanamicro.com>
7
Signed-off-by: Max Chou <max.chou@sifive.com>
8
Message-ID: <20230711165917.2629866-15-max.chou@sifive.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
---
10
target/riscv/kvm/kvm-cpu.c | 3 +++
11
include/crypto/sm4.h | 1 +
11
1 file changed, 3 insertions(+)
12
crypto/sm4.c | 10 ++++++++++
13
2 files changed, 11 insertions(+)
12
14
13
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
15
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
14
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/kvm/kvm-cpu.c
17
--- a/include/crypto/sm4.h
16
+++ b/target/riscv/kvm/kvm-cpu.c
18
+++ b/include/crypto/sm4.h
17
@@ -XXX,XX +XXX,XX @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
19
@@ -XXX,XX +XXX,XX @@
18
static KVMCPUConfig kvm_multi_ext_cfgs[] = {
20
#define QEMU_SM4_H
19
KVM_EXT_CFG("zicbom", ext_zicbom, KVM_RISCV_ISA_EXT_ZICBOM),
21
20
KVM_EXT_CFG("zicboz", ext_zicboz, KVM_RISCV_ISA_EXT_ZICBOZ),
22
extern const uint8_t sm4_sbox[256];
21
+ KVM_EXT_CFG("ziccrse", ext_ziccrse, KVM_RISCV_ISA_EXT_ZICCRSE),
23
+extern const uint32_t sm4_ck[32];
22
KVM_EXT_CFG("zicntr", ext_zicntr, KVM_RISCV_ISA_EXT_ZICNTR),
24
23
KVM_EXT_CFG("zicond", ext_zicond, KVM_RISCV_ISA_EXT_ZICOND),
25
static inline uint32_t sm4_subword(uint32_t word)
24
KVM_EXT_CFG("zicsr", ext_zicsr, KVM_RISCV_ISA_EXT_ZICSR),
26
{
25
@@ -XXX,XX +XXX,XX @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
27
diff --git a/crypto/sm4.c b/crypto/sm4.c
26
KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM),
28
index XXXXXXX..XXXXXXX 100644
27
KVM_EXT_CFG("zimop", ext_zimop, KVM_RISCV_ISA_EXT_ZIMOP),
29
--- a/crypto/sm4.c
28
KVM_EXT_CFG("zcmop", ext_zcmop, KVM_RISCV_ISA_EXT_ZCMOP),
30
+++ b/crypto/sm4.c
29
+ KVM_EXT_CFG("zabha", ext_zabha, KVM_RISCV_ISA_EXT_ZABHA),
31
@@ -XXX,XX +XXX,XX @@ uint8_t const sm4_sbox[] = {
30
KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS),
32
0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48,
31
KVM_EXT_CFG("zawrs", ext_zawrs, KVM_RISCV_ISA_EXT_ZAWRS),
32
KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA),
33
@@ -XXX,XX +XXX,XX @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
34
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
35
KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
36
KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
37
+ KVM_EXT_CFG("svvptc", ext_svvptc, KVM_RISCV_ISA_EXT_SVVPTC),
38
};
33
};
39
34
40
static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
35
+uint32_t const sm4_ck[] = {
36
+ 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
37
+ 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
38
+ 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
39
+ 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
40
+ 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
41
+ 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
42
+ 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
43
+ 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
44
+};
41
--
45
--
42
2.48.1
46
2.41.0
diff view generated by jsdifflib
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
1
From: Max Chou <max.chou@sifive.com>
2
2
3
CTR entries are accessed using ctrsource, ctrtarget and ctrdata
3
This commit adds support for the Zvksed vector-crypto extension, which
4
registers using smcsrind/sscsrind extension. This commits extends
4
consists of the following instructions:
5
the csrind extension to support CTR registers.
5
6
6
* vsm4k.vi
7
ctrsource is accessible through xireg CSR, ctrtarget is accessible
7
* vsm4r.[vv,vs]
8
through xireg1 and ctrdata is accessible through xireg2 CSR.
8
9
9
Translation functions are defined in
10
CTR supports maximum depth of 256 entries which are accessed using
10
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
11
xiselect range 0x200 to 0x2ff.
11
`target/riscv/vcrypto_helper.c`.
12
12
13
This commits also adds properties to enable CTR extension. CTR can be
13
Signed-off-by: Max Chou <max.chou@sifive.com>
14
enabled using smctr=true and ssctr=true now.
14
Reviewed-by: Frank Chang <frank.chang@sifive.com>
15
15
[lawrence.hunter@codethink.co.uk: Moved SM4 functions from
16
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
16
crypto_helper.c to vcrypto_helper.c]
17
Acked-by: Alistair Francis <alistair.francis@wdc.com>
17
[nazar.kazakov@codethink.co.uk: Added alignment checks, refactored code to
18
Message-ID: <20250212-b4-ctr_upstream_v6-v7-1-4e8159ea33bf@rivosinc.com>
18
use macros, and minor style changes]
19
Signed-off-by: Max Chou <max.chou@sifive.com>
20
Message-ID: <20230711165917.2629866-16-max.chou@sifive.com>
19
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
21
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
20
---
22
---
21
target/riscv/cpu.c | 26 ++++++-
23
target/riscv/cpu_cfg.h | 1 +
22
target/riscv/csr.c | 150 ++++++++++++++++++++++++++++++++++++-
24
target/riscv/helper.h | 4 +
23
target/riscv/tcg/tcg-cpu.c | 11 +++
25
target/riscv/insn32.decode | 5 +
24
3 files changed, 185 insertions(+), 2 deletions(-)
26
target/riscv/cpu.c | 5 +-
25
27
target/riscv/vcrypto_helper.c | 127 +++++++++++++++++++++++
28
target/riscv/insn_trans/trans_rvvk.c.inc | 43 ++++++++
29
6 files changed, 184 insertions(+), 1 deletion(-)
30
31
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/cpu_cfg.h
34
+++ b/target/riscv/cpu_cfg.h
35
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
36
bool ext_zvkned;
37
bool ext_zvknha;
38
bool ext_zvknhb;
39
+ bool ext_zvksed;
40
bool ext_zvksh;
41
bool ext_zmmul;
42
bool ext_zvfbfmin;
43
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
44
index XXXXXXX..XXXXXXX 100644
45
--- a/target/riscv/helper.h
46
+++ b/target/riscv/helper.h
47
@@ -XXX,XX +XXX,XX @@ DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
48
49
DEF_HELPER_5(vghsh_vv, void, ptr, ptr, ptr, env, i32)
50
DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32)
51
+
52
+DEF_HELPER_5(vsm4k_vi, void, ptr, ptr, i32, env, i32)
53
+DEF_HELPER_4(vsm4r_vv, void, ptr, ptr, env, i32)
54
+DEF_HELPER_4(vsm4r_vs, void, ptr, ptr, env, i32)
55
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
56
index XXXXXXX..XXXXXXX 100644
57
--- a/target/riscv/insn32.decode
58
+++ b/target/riscv/insn32.decode
59
@@ -XXX,XX +XXX,XX @@ vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
60
# *** Zvkg vector crypto extension ***
61
vghsh_vv 101100 1 ..... ..... 010 ..... 1110111 @r_vm_1
62
vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
63
+
64
+# *** Zvksed vector crypto extension ***
65
+vsm4k_vi 100001 1 ..... ..... 010 ..... 1110111 @r_vm_1
66
+vsm4r_vv 101000 1 ..... 10000 010 ..... 1110111 @r2_vm_1
67
+vsm4r_vs 101001 1 ..... 10000 010 ..... 1110111 @r2_vm_1
26
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
68
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
27
index XXXXXXX..XXXXXXX 100644
69
index XXXXXXX..XXXXXXX 100644
28
--- a/target/riscv/cpu.c
70
--- a/target/riscv/cpu.c
29
+++ b/target/riscv/cpu.c
71
+++ b/target/riscv/cpu.c
30
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
72
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
31
ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12),
73
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
32
ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
74
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
33
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
75
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
34
+ ISA_EXT_DATA_ENTRY(smctr, PRIV_VERSION_1_12_0, ext_smctr),
76
+ ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
35
+ ISA_EXT_DATA_ENTRY(ssctr, PRIV_VERSION_1_12_0, ext_ssctr),
77
ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
36
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
78
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
37
ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
79
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
38
ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
80
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
39
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
81
* in qemu
40
MULTI_EXT_CFG_BOOL("smcdeleg", ext_smcdeleg, false),
82
*/
41
MULTI_EXT_CFG_BOOL("sscsrind", ext_sscsrind, false),
83
if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
42
MULTI_EXT_CFG_BOOL("ssccfg", ext_ssccfg, false),
84
- cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
43
+ MULTI_EXT_CFG_BOOL("smctr", ext_smctr, false),
85
+ cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || cpu->cfg.ext_zvksh) &&
44
+ MULTI_EXT_CFG_BOOL("ssctr", ext_ssctr, false),
86
+ !cpu->cfg.ext_zve32f) {
45
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
87
error_setg(errp,
46
MULTI_EXT_CFG_BOOL("zicfilp", ext_zicfilp, false),
88
"Vector crypto extensions require V or Zve* extensions");
47
MULTI_EXT_CFG_BOOL("zicfiss", ext_zicfiss, false),
89
return;
48
@@ -XXX,XX +XXX,XX @@ static RISCVCPUImpliedExtsRule SSPM_IMPLIED = {
90
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
49
},
91
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
50
};
92
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
51
93
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
52
+static RISCVCPUImpliedExtsRule SMCTR_IMPLIED = {
94
+ DEFINE_PROP_BOOL("x-zvksed", RISCVCPU, cfg.ext_zvksed, false),
53
+ .ext = CPU_CFG_OFFSET(ext_smctr),
95
DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false),
54
+ .implied_misa_exts = RVS,
96
55
+ .implied_multi_exts = {
97
DEFINE_PROP_END_OF_LIST(),
56
+ CPU_CFG_OFFSET(ext_sscsrind),
98
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
57
+
99
index XXXXXXX..XXXXXXX 100644
58
+ RISCV_IMPLIED_EXTS_RULE_END
100
--- a/target/riscv/vcrypto_helper.c
59
+ },
101
+++ b/target/riscv/vcrypto_helper.c
60
+};
102
@@ -XXX,XX +XXX,XX @@
61
+
103
#include "cpu.h"
62
+static RISCVCPUImpliedExtsRule SSCTR_IMPLIED = {
104
#include "crypto/aes.h"
63
+ .ext = CPU_CFG_OFFSET(ext_ssctr),
105
#include "crypto/aes-round.h"
64
+ .implied_misa_exts = RVS,
106
+#include "crypto/sm4.h"
65
+ .implied_multi_exts = {
107
#include "exec/memop.h"
66
+ CPU_CFG_OFFSET(ext_sscsrind),
108
#include "exec/exec-all.h"
67
+
109
#include "exec/helper-proto.h"
68
+ RISCV_IMPLIED_EXTS_RULE_END
110
@@ -XXX,XX +XXX,XX @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
69
+ },
111
vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
70
+};
112
env->vstart = 0;
71
+
72
RISCVCPUImpliedExtsRule *riscv_misa_ext_implied_rules[] = {
73
&RVA_IMPLIED, &RVD_IMPLIED, &RVF_IMPLIED,
74
&RVM_IMPLIED, &RVV_IMPLIED, NULL
75
@@ -XXX,XX +XXX,XX @@ RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[] = {
76
&ZVFH_IMPLIED, &ZVFHMIN_IMPLIED, &ZVKN_IMPLIED,
77
&ZVKNC_IMPLIED, &ZVKNG_IMPLIED, &ZVKNHB_IMPLIED,
78
&ZVKS_IMPLIED, &ZVKSC_IMPLIED, &ZVKSG_IMPLIED, &SSCFG_IMPLIED,
79
- &SUPM_IMPLIED, &SSPM_IMPLIED,
80
+ &SUPM_IMPLIED, &SSPM_IMPLIED, &SMCTR_IMPLIED, &SSCTR_IMPLIED,
81
NULL
82
};
83
84
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
85
index XXXXXXX..XXXXXXX 100644
86
--- a/target/riscv/csr.c
87
+++ b/target/riscv/csr.c
88
@@ -XXX,XX +XXX,XX @@ static bool xiselect_cd_range(target_ulong isel)
89
return (ISELECT_CD_FIRST <= isel && isel <= ISELECT_CD_LAST);
90
}
113
}
91
114
+
92
+static bool xiselect_ctr_range(int csrno, target_ulong isel)
115
+void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
93
+{
116
+ uint32_t desc)
94
+ /* MIREG-MIREG6 for the range 0x200-0x2ff are not used by CTR. */
117
+{
95
+ return CTR_ENTRIES_FIRST <= isel && isel <= CTR_ENTRIES_LAST &&
118
+ const uint32_t egs = 4;
96
+ csrno < CSR_MIREG;
119
+ uint32_t rnd = uimm5 & 0x7;
97
+}
120
+ uint32_t group_start = env->vstart / egs;
98
+
121
+ uint32_t group_end = env->vl / egs;
99
static int rmw_iprio(target_ulong xlen,
122
+ uint32_t esz = sizeof(uint32_t);
100
target_ulong iselect, uint8_t *iprio,
123
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
101
target_ulong *val, target_ulong new_val,
124
+
102
@@ -XXX,XX +XXX,XX @@ static int rmw_iprio(target_ulong xlen,
125
+ for (uint32_t i = group_start; i < group_end; ++i) {
103
return 0;
126
+ uint32_t vstart = i * egs;
127
+ uint32_t vend = (i + 1) * egs;
128
+ uint32_t rk[4] = {0};
129
+ uint32_t tmp[8] = {0};
130
+
131
+ for (uint32_t j = vstart; j < vend; ++j) {
132
+ rk[j - vstart] = *((uint32_t *)vs2 + H4(j));
133
+ }
134
+
135
+ for (uint32_t j = 0; j < egs; ++j) {
136
+ tmp[j] = rk[j];
137
+ }
138
+
139
+ for (uint32_t j = 0; j < egs; ++j) {
140
+ uint32_t b, s;
141
+ b = tmp[j + 1] ^ tmp[j + 2] ^ tmp[j + 3] ^ sm4_ck[rnd * 4 + j];
142
+
143
+ s = sm4_subword(b);
144
+
145
+ tmp[j + 4] = tmp[j] ^ (s ^ rol32(s, 13) ^ rol32(s, 23));
146
+ }
147
+
148
+ for (uint32_t j = vstart; j < vend; ++j) {
149
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
150
+ }
151
+ }
152
+
153
+ env->vstart = 0;
154
+ /* set tail elements to 1s */
155
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
156
+}
157
+
158
+static void do_sm4_round(uint32_t *rk, uint32_t *buf)
159
+{
160
+ const uint32_t egs = 4;
161
+ uint32_t s, b;
162
+
163
+ for (uint32_t j = egs; j < egs * 2; ++j) {
164
+ b = buf[j - 3] ^ buf[j - 2] ^ buf[j - 1] ^ rk[j - 4];
165
+
166
+ s = sm4_subword(b);
167
+
168
+ buf[j] = buf[j - 4] ^ (s ^ rol32(s, 2) ^ rol32(s, 10) ^ rol32(s, 18) ^
169
+ rol32(s, 24));
170
+ }
171
+}
172
+
173
+void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
174
+{
175
+ const uint32_t egs = 4;
176
+ uint32_t group_start = env->vstart / egs;
177
+ uint32_t group_end = env->vl / egs;
178
+ uint32_t esz = sizeof(uint32_t);
179
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
180
+
181
+ for (uint32_t i = group_start; i < group_end; ++i) {
182
+ uint32_t vstart = i * egs;
183
+ uint32_t vend = (i + 1) * egs;
184
+ uint32_t rk[4] = {0};
185
+ uint32_t tmp[8] = {0};
186
+
187
+ for (uint32_t j = vstart; j < vend; ++j) {
188
+ rk[j - vstart] = *((uint32_t *)vs2 + H4(j));
189
+ }
190
+
191
+ for (uint32_t j = vstart; j < vend; ++j) {
192
+ tmp[j - vstart] = *((uint32_t *)vd + H4(j));
193
+ }
194
+
195
+ do_sm4_round(rk, tmp);
196
+
197
+ for (uint32_t j = vstart; j < vend; ++j) {
198
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
199
+ }
200
+ }
201
+
202
+ env->vstart = 0;
203
+ /* set tail elements to 1s */
204
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
205
+}
206
+
207
+void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
208
+{
209
+ const uint32_t egs = 4;
210
+ uint32_t group_start = env->vstart / egs;
211
+ uint32_t group_end = env->vl / egs;
212
+ uint32_t esz = sizeof(uint32_t);
213
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
214
+
215
+ for (uint32_t i = group_start; i < group_end; ++i) {
216
+ uint32_t vstart = i * egs;
217
+ uint32_t vend = (i + 1) * egs;
218
+ uint32_t rk[4] = {0};
219
+ uint32_t tmp[8] = {0};
220
+
221
+ for (uint32_t j = 0; j < egs; ++j) {
222
+ rk[j] = *((uint32_t *)vs2 + H4(j));
223
+ }
224
+
225
+ for (uint32_t j = vstart; j < vend; ++j) {
226
+ tmp[j - vstart] = *((uint32_t *)vd + H4(j));
227
+ }
228
+
229
+ do_sm4_round(rk, tmp);
230
+
231
+ for (uint32_t j = vstart; j < vend; ++j) {
232
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
233
+ }
234
+ }
235
+
236
+ env->vstart = 0;
237
+ /* set tail elements to 1s */
238
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
239
+}
240
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
241
index XXXXXXX..XXXXXXX 100644
242
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
243
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
244
@@ -XXX,XX +XXX,XX @@ static bool vghsh_check(DisasContext *s, arg_rmrr *a)
104
}
245
}
105
246
106
+static int rmw_ctrsource(CPURISCVState *env, int isel, target_ulong *val,
247
GEN_VV_UNMASKED_TRANS(vghsh_vv, vghsh_check, ZVKG_EGS)
107
+ target_ulong new_val, target_ulong wr_mask)
248
+
108
+{
249
+/*
109
+ /*
250
+ * Zvksed
110
+ * CTR arrays are treated as circular buffers and TOS always points to next
251
+ */
111
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
252
+
112
+ * 0 is always the latest one, traversal is a bit different here. See the
253
+#define ZVKSED_EGS 4
113
+ * below example.
254
+
114
+ *
255
+static bool zvksed_check(DisasContext *s)
115
+ * Depth = 16.
256
+{
116
+ *
257
+ int egw_bytes = ZVKSED_EGS << s->sew;
117
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
258
+ return s->cfg_ptr->ext_zvksed == true &&
118
+ * TOS H
259
+ require_rvv(s) &&
119
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
260
+ vext_check_isa_ill(s) &&
120
+ */
261
+ MAXSZ(s) >= egw_bytes &&
121
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
262
+ s->sew == MO_32;
122
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
263
+}
123
+ uint64_t idx;
264
+
124
+
265
+static bool vsm4k_vi_check(DisasContext *s, arg_rmrr *a)
125
+ /* Entry greater than depth-1 is read-only zero */
266
+{
126
+ if (entry >= depth) {
267
+ return zvksed_check(s) &&
127
+ if (val) {
268
+ require_align(a->rd, s->lmul) &&
128
+ *val = 0;
269
+ require_align(a->rs2, s->lmul);
129
+ }
270
+}
130
+ return 0;
271
+
131
+ }
272
+GEN_VI_UNMASKED_TRANS(vsm4k_vi, vsm4k_vi_check, ZVKSED_EGS)
132
+
273
+
133
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
274
+static bool vsm4r_vv_check(DisasContext *s, arg_rmr *a)
134
+ idx = (idx - entry - 1) & (depth - 1);
275
+{
135
+
276
+ return zvksed_check(s) &&
136
+ if (val) {
277
+ require_align(a->rd, s->lmul) &&
137
+ *val = env->ctr_src[idx];
278
+ require_align(a->rs2, s->lmul);
138
+ }
279
+}
139
+
280
+
140
+ env->ctr_src[idx] = (env->ctr_src[idx] & ~wr_mask) | (new_val & wr_mask);
281
+GEN_V_UNMASKED_TRANS(vsm4r_vv, vsm4r_vv_check, ZVKSED_EGS)
141
+
282
+
142
+ return 0;
283
+static bool vsm4r_vs_check(DisasContext *s, arg_rmr *a)
143
+}
284
+{
144
+
285
+ return zvksed_check(s) &&
145
+static int rmw_ctrtarget(CPURISCVState *env, int isel, target_ulong *val,
286
+ !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
146
+ target_ulong new_val, target_ulong wr_mask)
287
+ require_align(a->rd, s->lmul);
147
+{
288
+}
148
+ /*
289
+
149
+ * CTR arrays are treated as circular buffers and TOS always points to next
290
+GEN_V_UNMASKED_TRANS(vsm4r_vs, vsm4r_vs_check, ZVKSED_EGS)
150
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
151
+ * 0 is always the latest one, traversal is a bit different here. See the
152
+ * below example.
153
+ *
154
+ * Depth = 16.
155
+ *
156
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
157
+ * head H
158
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
159
+ */
160
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
161
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
162
+ uint64_t idx;
163
+
164
+ /* Entry greater than depth-1 is read-only zero */
165
+ if (entry >= depth) {
166
+ if (val) {
167
+ *val = 0;
168
+ }
169
+ return 0;
170
+ }
171
+
172
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
173
+ idx = (idx - entry - 1) & (depth - 1);
174
+
175
+ if (val) {
176
+ *val = env->ctr_dst[idx];
177
+ }
178
+
179
+ env->ctr_dst[idx] = (env->ctr_dst[idx] & ~wr_mask) | (new_val & wr_mask);
180
+
181
+ return 0;
182
+}
183
+
184
+static int rmw_ctrdata(CPURISCVState *env, int isel, target_ulong *val,
185
+ target_ulong new_val, target_ulong wr_mask)
186
+{
187
+ /*
188
+ * CTR arrays are treated as circular buffers and TOS always points to next
189
+ * empty slot, keeping TOS - 1 always pointing to latest entry. Given entry
190
+ * 0 is always the latest one, traversal is a bit different here. See the
191
+ * below example.
192
+ *
193
+ * Depth = 16.
194
+ *
195
+ * idx [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
196
+ * head H
197
+ * entry 6 5 4 3 2 1 0 F E D C B A 9 8 7
198
+ */
199
+ const uint64_t entry = isel - CTR_ENTRIES_FIRST;
200
+ const uint64_t mask = wr_mask & CTRDATA_MASK;
201
+ const uint64_t depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
202
+ uint64_t idx;
203
+
204
+ /* Entry greater than depth-1 is read-only zero */
205
+ if (entry >= depth) {
206
+ if (val) {
207
+ *val = 0;
208
+ }
209
+ return 0;
210
+ }
211
+
212
+ idx = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
213
+ idx = (idx - entry - 1) & (depth - 1);
214
+
215
+ if (val) {
216
+ *val = env->ctr_data[idx];
217
+ }
218
+
219
+ env->ctr_data[idx] = (env->ctr_data[idx] & ~mask) | (new_val & mask);
220
+
221
+ return 0;
222
+}
223
+
224
static RISCVException rmw_xireg_aia(CPURISCVState *env, int csrno,
225
target_ulong isel, target_ulong *val,
226
target_ulong new_val, target_ulong wr_mask)
227
@@ -XXX,XX +XXX,XX @@ done:
228
return ret;
229
}
230
231
+static int rmw_xireg_ctr(CPURISCVState *env, int csrno,
232
+ target_ulong isel, target_ulong *val,
233
+ target_ulong new_val, target_ulong wr_mask)
234
+{
235
+ if (!riscv_cpu_cfg(env)->ext_smctr && !riscv_cpu_cfg(env)->ext_ssctr) {
236
+ return -EINVAL;
237
+ }
238
+
239
+ if (csrno == CSR_SIREG || csrno == CSR_VSIREG) {
240
+ return rmw_ctrsource(env, isel, val, new_val, wr_mask);
241
+ } else if (csrno == CSR_SIREG2 || csrno == CSR_VSIREG2) {
242
+ return rmw_ctrtarget(env, isel, val, new_val, wr_mask);
243
+ } else if (csrno == CSR_SIREG3 || csrno == CSR_VSIREG3) {
244
+ return rmw_ctrdata(env, isel, val, new_val, wr_mask);
245
+ } else if (val) {
246
+ *val = 0;
247
+ }
248
+
249
+ return 0;
250
+}
251
+
252
/*
253
* rmw_xireg_csrind: Perform indirect access to xireg and xireg2-xireg6
254
*
255
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
256
target_ulong isel, target_ulong *val,
257
target_ulong new_val, target_ulong wr_mask)
258
{
259
- int ret = -EINVAL;
260
bool virt = csrno == CSR_VSIREG ? true : false;
261
+ int ret = -EINVAL;
262
263
if (xiselect_cd_range(isel)) {
264
ret = rmw_xireg_cd(env, csrno, isel, val, new_val, wr_mask);
265
+ } else if (xiselect_ctr_range(csrno, isel)) {
266
+ ret = rmw_xireg_ctr(env, csrno, isel, val, new_val, wr_mask);
267
} else {
268
/*
269
* As per the specification, access to unimplented region is undefined
270
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
271
index XXXXXXX..XXXXXXX 100644
272
--- a/target/riscv/tcg/tcg-cpu.c
273
+++ b/target/riscv/tcg/tcg-cpu.c
274
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
275
return;
276
}
277
278
+ if ((cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr) &&
279
+ (!riscv_has_ext(env, RVS) || !cpu->cfg.ext_sscsrind)) {
280
+ if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_smctr)) ||
281
+ cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_ssctr))) {
282
+ error_setg(errp, "Smctr and Ssctr require S-mode and Sscsrind");
283
+ return;
284
+ }
285
+ cpu->cfg.ext_smctr = false;
286
+ cpu->cfg.ext_ssctr = false;
287
+ }
288
+
289
/*
290
* Disable isa extensions based on priv spec after we
291
* validated and set everything we need.
292
--
291
--
293
2.48.1
292
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Rob Bradford <rbradford@rivosinc.com>
2
2
3
Coverity reported a DEADCODE ticket in this function, as follows:
3
These are WARL fields - zero out the bits for unavailable counters and
4
special case the TM bit in mcountinhibit which is hardwired to zero.
5
This patch achieves this by modifying the value written so that any use
6
of the field will see the correctly masked bits.
4
7
5
>>>> CID 1590358: Control flow issues (DEADCODE)
8
Tested by modifying OpenSBI to write max value to these CSRs and upon
6
>>>> Execution cannot reach this statement: "return ret;".
9
subsequent read the appropriate number of bits for number of PMUs is
7
> 380 return ret;
10
enabled and the TM bit is zero in mcountinhibit.
8
> 381 }
9
11
10
The cause is that the 'if (ret != RISCV_EXCP_NONE)' conditional is
12
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
11
duplicated:
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
12
14
Reviewed-by: Atish Patra <atishp@rivosinc.com>
13
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_AIA);
15
Message-ID: <20230802124906.24197-1-rbradford@rivosinc.com>
14
if (ret != RISCV_EXCP_NONE) {
15
return ret;
16
}
17
18
if (ret != RISCV_EXCP_NONE) {
19
return ret;
20
}
21
22
Remove the duplication to fix the deadcode.
23
24
Resolves: Coverity CID 1590358
25
Fixes: dbcb6e1ccf ("target/riscv: Enable S*stateen bits for AIA")
26
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
27
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
28
Message-ID: <20250121184847.2109128-5-dbarboza@ventanamicro.com>
29
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
30
---
17
---
31
target/riscv/csr.c | 4 ----
18
target/riscv/csr.c | 11 +++++++++--
32
1 file changed, 4 deletions(-)
19
1 file changed, 9 insertions(+), 2 deletions(-)
33
20
34
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
21
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
35
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
36
--- a/target/riscv/csr.c
23
--- a/target/riscv/csr.c
37
+++ b/target/riscv/csr.c
24
+++ b/target/riscv/csr.c
38
@@ -XXX,XX +XXX,XX @@ static RISCVException aia_smode32(CPURISCVState *env, int csrno)
25
@@ -XXX,XX +XXX,XX @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
39
return ret;
26
{
40
}
27
int cidx;
41
28
PMUCTRState *counter;
42
- if (ret != RISCV_EXCP_NONE) {
29
+ RISCVCPU *cpu = env_archcpu(env);
43
- return ret;
30
44
- }
31
- env->mcountinhibit = val;
45
-
32
+ /* WARL register - disable unavailable counters; TM bit is always 0 */
46
return smode32(env, csrno);
33
+ env->mcountinhibit =
34
+ val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR);
35
36
/* Check if any other counter is also monitoring cycles/instructions */
37
for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
38
@@ -XXX,XX +XXX,XX @@ static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
39
static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
40
target_ulong val)
41
{
42
- env->mcounteren = val;
43
+ RISCVCPU *cpu = env_archcpu(env);
44
+
45
+ /* WARL register - disable unavailable counters */
46
+ env->mcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
47
+ COUNTEREN_IR);
48
return RISCV_EXCP_NONE;
47
}
49
}
48
50
49
--
51
--
50
2.48.1
52
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Jason Chien <jason.chien@sifive.com>
2
2
3
At this moment ziccrse is a TCG always enabled named feature for
3
RVA23 Profiles states:
4
priv_ver > 1.11 that has no exclusive flag. In the next patch we'll make
4
The RVA23 profiles are intended to be used for 64-bit application
5
the KVM driver turn ziccrse off if the extension isn't available in the
5
processors that will run rich OS stacks from standard binary OS
6
host, and we'll need an ext_ziccrse flag in the CPU state for that.
6
distributions and with a substantial number of third-party binary user
7
applications that will be supported over a considerable length of time
8
in the field.
7
9
8
Create an exclusive flag for it like we do with other named features.
10
The chapter 4 of the unprivileged spec introduces the Zihintntl extension
9
As with any named features we already have, it won't be exposed to
11
and Zihintntl is a mandatory extension presented in RVA23 Profiles, whose
10
users.
12
purpose is to enable application and operating system portability across
13
different implementations. Thus the DTS should contain the Zihintntl ISA
14
string in order to pass to software.
11
15
12
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
16
The unprivileged spec states:
13
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
17
Like any HINTs, these instructions may be freely ignored. Hence, although
14
Message-ID: <20250221153758.652078-3-dbarboza@ventanamicro.com>
18
they are described in terms of cache-based memory hierarchies, they do not
19
mandate the provision of caches.
20
21
These instructions are encoded with non-used opcode, e.g. ADD x0, x0, x2,
22
which QEMU already supports, and QEMU does not emulate cache. Therefore
23
these instructions can be considered as a no-op, and we only need to add
24
a new property for the Zihintntl extension.
25
26
Reviewed-by: Frank Chang <frank.chang@sifive.com>
27
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
28
Signed-off-by: Jason Chien <jason.chien@sifive.com>
29
Message-ID: <20230726074049.19505-2-jason.chien@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
30
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
31
---
17
target/riscv/cpu_cfg.h | 3 +++
32
target/riscv/cpu_cfg.h | 1 +
18
target/riscv/cpu.c | 3 ++-
33
target/riscv/cpu.c | 2 ++
19
target/riscv/tcg/tcg-cpu.c | 2 ++
34
2 files changed, 3 insertions(+)
20
3 files changed, 7 insertions(+), 1 deletion(-)
21
35
22
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
36
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
23
index XXXXXXX..XXXXXXX 100644
37
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/cpu_cfg.h
38
--- a/target/riscv/cpu_cfg.h
25
+++ b/target/riscv/cpu_cfg.h
39
+++ b/target/riscv/cpu_cfg.h
26
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
40
@@ -XXX,XX +XXX,XX @@ struct RISCVCPUConfig {
27
bool has_priv_1_12;
41
bool ext_icbom;
28
bool has_priv_1_11;
42
bool ext_icboz;
29
43
bool ext_zicond;
30
+ /* Always enabled for TCG if has_priv_1_11 */
44
+ bool ext_zihintntl;
31
+ bool ext_ziccrse;
45
bool ext_zihintpause;
32
+
46
bool ext_smstateen;
33
/* Vendor-specific custom extensions */
47
bool ext_sstc;
34
bool ext_xtheadba;
35
bool ext_xtheadbb;
36
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
48
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
37
index XXXXXXX..XXXXXXX 100644
49
index XXXXXXX..XXXXXXX 100644
38
--- a/target/riscv/cpu.c
50
--- a/target/riscv/cpu.c
39
+++ b/target/riscv/cpu.c
51
+++ b/target/riscv/cpu.c
40
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
52
@@ -XXX,XX +XXX,XX @@ static const struct isa_ext_data isa_edata_arr[] = {
41
ISA_EXT_DATA_ENTRY(ziccamoa, PRIV_VERSION_1_11_0, has_priv_1_11),
42
ISA_EXT_DATA_ENTRY(ziccif, PRIV_VERSION_1_11_0, has_priv_1_11),
43
ISA_EXT_DATA_ENTRY(zicclsm, PRIV_VERSION_1_11_0, has_priv_1_11),
44
- ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, has_priv_1_11),
45
+ ISA_EXT_DATA_ENTRY(ziccrse, PRIV_VERSION_1_11_0, ext_ziccrse),
46
ISA_EXT_DATA_ENTRY(zicfilp, PRIV_VERSION_1_12_0, ext_zicfilp),
47
ISA_EXT_DATA_ENTRY(zicfiss, PRIV_VERSION_1_13_0, ext_zicfiss),
48
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
53
ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
49
@@ -XXX,XX +XXX,XX @@ const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = {
54
ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
50
MULTI_EXT_CFG_BOOL("zic64b", ext_zic64b, true),
55
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
51
MULTI_EXT_CFG_BOOL("ssstateen", ext_ssstateen, true),
56
+ ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl),
52
MULTI_EXT_CFG_BOOL("sha", ext_sha, true),
57
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
53
+ MULTI_EXT_CFG_BOOL("ziccrse", ext_ziccrse, true),
58
ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
54
59
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
55
{ },
60
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
56
};
61
DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
57
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
62
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
58
index XXXXXXX..XXXXXXX 100644
63
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
59
--- a/target/riscv/tcg/tcg-cpu.c
64
+ DEFINE_PROP_BOOL("Zihintntl", RISCVCPU, cfg.ext_zihintntl, true),
60
+++ b/target/riscv/tcg/tcg-cpu.c
65
DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
61
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_update_named_features(RISCVCPU *cpu)
66
DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
62
67
DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true),
63
cpu->cfg.ext_sha = riscv_has_ext(&cpu->env, RVH) &&
64
cpu->cfg.ext_ssstateen;
65
+
66
+ cpu->cfg.ext_ziccrse = cpu->cfg.has_priv_1_11;
67
}
68
69
static void riscv_cpu_validate_g(RISCVCPU *cpu)
70
--
68
--
71
2.48.1
69
2.41.0
diff view generated by jsdifflib
1
From: Quan Zhou <zhouquan@iscas.ac.cn>
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
2
3
When the Sscofpmf/Svade/Svadu/Smnpm/Ssnpm exts is available
3
Commit a47842d ("riscv: Add support for the Zfa extension") implemented the zfa extension.
4
expose it to the guest so that guest can use it.
4
However, it has some typos for fleq.d and fltq.d. Both of them misused the fltq.s
5
helper function.
5
6
6
Signed-off-by: Quan Zhou <zhouquan@iscas.ac.cn>
7
Fixes: a47842d ("riscv: Add support for the Zfa extension")
8
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Message-ID: <303616ccad2b5309768157b50d93b3e89fecc9cb.1740371468.git.zhouquan@iscas.ac.cn>
10
Reviewed-by: Weiwei Li <liweiwei@iscas.ac.cn>
11
Message-ID: <20230728003906.768-1-zhiwei_liu@linux.alibaba.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
13
---
11
target/riscv/kvm/kvm-cpu.c | 5 +++++
14
target/riscv/insn_trans/trans_rvzfa.c.inc | 4 ++--
12
1 file changed, 5 insertions(+)
15
1 file changed, 2 insertions(+), 2 deletions(-)
13
16
14
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
17
diff --git a/target/riscv/insn_trans/trans_rvzfa.c.inc b/target/riscv/insn_trans/trans_rvzfa.c.inc
15
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
16
--- a/target/riscv/kvm/kvm-cpu.c
19
--- a/target/riscv/insn_trans/trans_rvzfa.c.inc
17
+++ b/target/riscv/kvm/kvm-cpu.c
20
+++ b/target/riscv/insn_trans/trans_rvzfa.c.inc
18
@@ -XXX,XX +XXX,XX @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = {
21
@@ -XXX,XX +XXX,XX @@ bool trans_fleq_d(DisasContext *ctx, arg_fleq_d *a)
19
KVM_EXT_CFG("zvksed", ext_zvksed, KVM_RISCV_ISA_EXT_ZVKSED),
22
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
20
KVM_EXT_CFG("zvksh", ext_zvksh, KVM_RISCV_ISA_EXT_ZVKSH),
23
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
21
KVM_EXT_CFG("zvkt", ext_zvkt, KVM_RISCV_ISA_EXT_ZVKT),
24
22
+ KVM_EXT_CFG("smnpm", ext_smnpm, KVM_RISCV_ISA_EXT_SMNPM),
25
- gen_helper_fltq_s(dest, cpu_env, src1, src2);
23
KVM_EXT_CFG("smstateen", ext_smstateen, KVM_RISCV_ISA_EXT_SMSTATEEN),
26
+ gen_helper_fleq_d(dest, cpu_env, src1, src2);
24
KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
27
gen_set_gpr(ctx, a->rd, dest);
25
+ KVM_EXT_CFG("sscofpmf", ext_sscofpmf, KVM_RISCV_ISA_EXT_SSCOFPMF),
28
return true;
26
+ KVM_EXT_CFG("ssnpm", ext_ssnpm, KVM_RISCV_ISA_EXT_SSNPM),
29
}
27
KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
30
@@ -XXX,XX +XXX,XX @@ bool trans_fltq_d(DisasContext *ctx, arg_fltq_d *a)
28
+ KVM_EXT_CFG("svade", ext_svade, KVM_RISCV_ISA_EXT_SVADE),
31
TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
29
+ KVM_EXT_CFG("svadu", ext_svadu, KVM_RISCV_ISA_EXT_SVADU),
32
TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
30
KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
33
31
KVM_EXT_CFG("svnapot", ext_svnapot, KVM_RISCV_ISA_EXT_SVNAPOT),
34
- gen_helper_fltq_s(dest, cpu_env, src1, src2);
32
KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
35
+ gen_helper_fltq_d(dest, cpu_env, src1, src2);
36
gen_set_gpr(ctx, a->rd, dest);
37
return true;
38
}
33
--
39
--
34
2.48.1
40
2.41.0
diff view generated by jsdifflib
1
From: Jason Chien <jason.chien@sifive.com>
1
From: Jason Chien <jason.chien@sifive.com>
2
2
3
The header contains duplicate macro definitions.
3
When writing the upper mtime, we should keep the original lower mtime
4
This commit eliminates the duplicate part.
4
whose value is given by cpu_riscv_read_rtc() instead of
5
cpu_riscv_read_rtc_raw(). The same logic applies to writes to lower mtime.
5
6
6
Signed-off-by: Jason Chien <jason.chien@sifive.com>
7
Signed-off-by: Jason Chien <jason.chien@sifive.com>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250115141730.30858-2-jason.chien@sifive.com>
9
Message-ID: <20230728082502.26439-1-jason.chien@sifive.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
11
---
13
hw/riscv/riscv-iommu-bits.h | 22 ++++++----------------
12
hw/intc/riscv_aclint.c | 5 +++--
14
1 file changed, 6 insertions(+), 16 deletions(-)
13
1 file changed, 3 insertions(+), 2 deletions(-)
15
14
16
diff --git a/hw/riscv/riscv-iommu-bits.h b/hw/riscv/riscv-iommu-bits.h
15
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
17
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/riscv/riscv-iommu-bits.h
17
--- a/hw/intc/riscv_aclint.c
19
+++ b/hw/riscv/riscv-iommu-bits.h
18
+++ b/hw/intc/riscv_aclint.c
20
@@ -XXX,XX +XXX,XX @@ struct riscv_iommu_pq_record {
19
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
21
#define RISCV_IOMMU_PREQ_HDR_PRIV BIT_ULL(33)
20
return;
22
#define RISCV_IOMMU_PREQ_HDR_EXEC BIT_ULL(34)
21
} else if (addr == mtimer->time_base || addr == mtimer->time_base + 4) {
23
#define RISCV_IOMMU_PREQ_HDR_DID GENMASK_ULL(63, 40)
22
uint64_t rtc_r = cpu_riscv_read_rtc_raw(mtimer->timebase_freq);
24
+
23
+ uint64_t rtc = cpu_riscv_read_rtc(mtimer);
25
/* Payload fields */
24
26
+#define RISCV_IOMMU_PREQ_PAYLOAD_R BIT_ULL(0)
25
if (addr == mtimer->time_base) {
27
+#define RISCV_IOMMU_PREQ_PAYLOAD_W BIT_ULL(1)
26
if (size == 4) {
28
+#define RISCV_IOMMU_PREQ_PAYLOAD_L BIT_ULL(2)
27
/* time_lo for RV32/RV64 */
29
#define RISCV_IOMMU_PREQ_PAYLOAD_M GENMASK_ULL(2, 0)
28
- mtimer->time_delta = ((rtc_r & ~0xFFFFFFFFULL) | value) - rtc_r;
30
+#define RISCV_IOMMU_PREQ_PRG_INDEX GENMASK_ULL(11, 3)
29
+ mtimer->time_delta = ((rtc & ~0xFFFFFFFFULL) | value) - rtc_r;
31
+#define RISCV_IOMMU_PREQ_UADDR GENMASK_ULL(63, 12)
30
} else {
32
31
/* time for RV64 */
33
/* Common field positions */
32
mtimer->time_delta = value - rtc_r;
34
#define RISCV_IOMMU_PPN_FIELD GENMASK_ULL(53, 10)
33
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
35
@@ -XXX,XX +XXX,XX @@ enum riscv_iommu_fq_ttypes {
34
} else {
36
RISCV_IOMMU_FW_TTYPE_PCIE_MSG_REQ = 9,
35
if (size == 4) {
37
};
36
/* time_hi for RV32/RV64 */
38
37
- mtimer->time_delta = (value << 32 | (rtc_r & 0xFFFFFFFF)) - rtc_r;
39
-/* Header fields */
38
+ mtimer->time_delta = (value << 32 | (rtc & 0xFFFFFFFF)) - rtc_r;
40
-#define RISCV_IOMMU_PREQ_HDR_PID GENMASK_ULL(31, 12)
39
} else {
41
-#define RISCV_IOMMU_PREQ_HDR_PV BIT_ULL(32)
40
qemu_log_mask(LOG_GUEST_ERROR,
42
-#define RISCV_IOMMU_PREQ_HDR_PRIV BIT_ULL(33)
41
"aclint-mtimer: invalid time_hi write: %08x",
43
-#define RISCV_IOMMU_PREQ_HDR_EXEC BIT_ULL(34)
44
-#define RISCV_IOMMU_PREQ_HDR_DID GENMASK_ULL(63, 40)
45
-
46
-/* Payload fields */
47
-#define RISCV_IOMMU_PREQ_PAYLOAD_R BIT_ULL(0)
48
-#define RISCV_IOMMU_PREQ_PAYLOAD_W BIT_ULL(1)
49
-#define RISCV_IOMMU_PREQ_PAYLOAD_L BIT_ULL(2)
50
-#define RISCV_IOMMU_PREQ_PAYLOAD_M GENMASK_ULL(2, 0)
51
-#define RISCV_IOMMU_PREQ_PRG_INDEX GENMASK_ULL(11, 3)
52
-#define RISCV_IOMMU_PREQ_UADDR GENMASK_ULL(63, 12)
53
-
54
-
55
/*
56
* struct riscv_iommu_msi_pte - MSI Page Table Entry
57
*/
58
--
42
--
59
2.48.1
43
2.41.0
diff view generated by jsdifflib
1
From: Jason Chien <jason.chien@sifive.com>
1
From: Jason Chien <jason.chien@sifive.com>
2
2
3
Initially, the IOMMU would create a thread, but this thread was removed in
3
The variables whose values are given by cpu_riscv_read_rtc() should be named
4
the merged version. The struct members for thread control should have been
4
"rtc". The variables whose value are given by cpu_riscv_read_rtc_raw()
5
removed as well, but they were not removed in commit 0c54acb8243
5
should be named "rtc_r".
6
("hw/riscv: add RISC-V IOMMU base emulation").
7
6
8
Signed-off-by: Jason Chien <jason.chien@sifive.com>
7
Signed-off-by: Jason Chien <jason.chien@sifive.com>
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
11
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-ID: <20250115141730.30858-1-jason.chien@sifive.com>
9
Message-ID: <20230728082502.26439-2-jason.chien@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
11
---
15
hw/riscv/riscv-iommu.h | 5 -----
12
hw/intc/riscv_aclint.c | 6 +++---
16
1 file changed, 5 deletions(-)
13
1 file changed, 3 insertions(+), 3 deletions(-)
17
14
18
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
15
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
19
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/riscv/riscv-iommu.h
17
--- a/hw/intc/riscv_aclint.c
21
+++ b/hw/riscv/riscv-iommu.h
18
+++ b/hw/intc/riscv_aclint.c
22
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
19
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
23
/* interrupt notifier */
20
uint64_t next;
24
void (*notify)(RISCVIOMMUState *iommu, unsigned vector);
21
uint64_t diff;
25
22
26
- /* IOMMU State Machine */
23
- uint64_t rtc_r = cpu_riscv_read_rtc(mtimer);
27
- QemuThread core_proc; /* Background processing thread */
24
+ uint64_t rtc = cpu_riscv_read_rtc(mtimer);
28
- QemuCond core_cond; /* Background processing wake up signal */
25
29
- unsigned core_exec; /* Processing thread execution actions */
26
/* Compute the relative hartid w.r.t the socket */
30
-
27
hartid = hartid - mtimer->hartid_base;
31
/* IOMMU target address space */
28
32
AddressSpace *target_as;
29
mtimer->timecmp[hartid] = value;
33
MemoryRegion *target_mr;
30
- if (mtimer->timecmp[hartid] <= rtc_r) {
31
+ if (mtimer->timecmp[hartid] <= rtc) {
32
/*
33
* If we're setting an MTIMECMP value in the "past",
34
* immediately raise the timer interrupt
35
@@ -XXX,XX +XXX,XX @@ static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
36
37
/* otherwise, set up the future timer interrupt */
38
qemu_irq_lower(mtimer->timer_irqs[hartid]);
39
- diff = mtimer->timecmp[hartid] - rtc_r;
40
+ diff = mtimer->timecmp[hartid] - rtc;
41
/* back to ns (note args switched in muldiv64) */
42
uint64_t ns_diff = muldiv64(diff, NANOSECONDS_PER_SECOND, timebase_freq);
43
34
--
44
--
35
2.48.1
45
2.41.0
diff view generated by jsdifflib
1
From: Andrea Bolognani <abologna@redhat.com>
1
From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
2
2
3
Until now, the script has worked under the assumption that a
3
We should not use types dependend on host arch for target_ucontext.
4
host CPU can run binaries targeting any CPU in the same family.
4
This bug is found when run rv32 applications.
5
That's a fair enough assumption when it comes to running i386
6
binaries on x86_64, but it doesn't quite apply in the general
7
case.
8
5
9
For example, while riscv64 CPUs could theoretically run riscv32
6
Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
10
applications natively, in practice there exist few (if any?)
7
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
11
CPUs that implement the necessary silicon; moreover, even if you
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
had one such CPU, your host OS would most likely not have
9
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
13
enabled the necessary kernel bits.
10
Message-ID: <20230811055438.1945-1-zhiwei_liu@linux.alibaba.com>
14
15
This new option gives distro packagers the ability to opt out of
16
the assumption, likely on a per-architecture basis, and make
17
things work out of the box for a larger fraction of their user
18
base.
19
20
As an interesting side effect, this makes it possible to enable
21
execution of 64-bit binaries on 32-bit CPUs of the same family,
22
which is a perfectly valid use case that apparently hadn't been
23
considered until now.
24
25
Link: https://src.fedoraproject.org/rpms/qemu/pull-request/72
26
Thanks: David Abdurachmanov <davidlt@rivosinc.com>
27
Thanks: Daniel P. Berrangé <berrange@redhat.com>
28
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
29
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
30
Reviewed-by: Laurent Vivier <laurent@vivier.eu>
31
Message-ID: <20250127182924.103510-4-abologna@redhat.com>
32
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
33
---
12
---
34
scripts/qemu-binfmt-conf.sh | 19 ++++++++++++++++---
13
linux-user/riscv/signal.c | 4 ++--
35
1 file changed, 16 insertions(+), 3 deletions(-)
14
1 file changed, 2 insertions(+), 2 deletions(-)
36
15
37
diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh
16
diff --git a/linux-user/riscv/signal.c b/linux-user/riscv/signal.c
38
index XXXXXXX..XXXXXXX 100755
17
index XXXXXXX..XXXXXXX 100644
39
--- a/scripts/qemu-binfmt-conf.sh
18
--- a/linux-user/riscv/signal.c
40
+++ b/scripts/qemu-binfmt-conf.sh
19
+++ b/linux-user/riscv/signal.c
41
@@ -XXX,XX +XXX,XX @@ Usage: qemu-binfmt-conf.sh [--qemu-path PATH][--debian][--systemd CPU]
20
@@ -XXX,XX +XXX,XX @@ struct target_sigcontext {
42
--persistent: if yes, the interpreter is loaded when binfmt is
21
}; /* cf. riscv-linux:arch/riscv/include/uapi/asm/ptrace.h */
43
configured and remains in memory. All future uses
22
44
are cloned from the open file.
23
struct target_ucontext {
45
+ --ignore-family: if yes, it is assumed that the host CPU (e.g. riscv64)
24
- unsigned long uc_flags;
46
+ can't natively run programs targeting a CPU that is
25
- struct target_ucontext *uc_link;
47
+ part of the same family (e.g. riscv32).
26
+ abi_ulong uc_flags;
48
--preserve-argv0 preserve argv[0]
27
+ abi_ptr uc_link;
49
28
target_stack_t uc_stack;
50
To import templates with update-binfmts, use :
29
target_sigset_t uc_sigmask;
51
@@ -XXX,XX +XXX,XX @@ qemu_set_binfmts() {
30
uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)];
52
fi
53
54
if [ "$host_family" = "$family" ] ; then
55
- continue
56
+ # When --ignore-family is used, we have to generate rules even
57
+ # for targets that are in the same family as the host CPU. The
58
+ # only exception is of course when the CPU types exactly match
59
+ if [ "$target" = "$host_cpu" ] || [ "$IGNORE_FAMILY" = "no" ] ; then
60
+ continue
61
+ fi
62
fi
63
64
$BINFMT_SET
65
@@ -XXX,XX +XXX,XX @@ CREDENTIAL=no
66
PERSISTENT=no
67
PRESERVE_ARG0=no
68
QEMU_SUFFIX=""
69
+IGNORE_FAMILY=no
70
71
_longopts="debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,\
72
-persistent:,preserve-argv0:"
73
-options=$(getopt -o ds:Q:S:e:hc:p:g:F: -l ${_longopts} -- "$@")
74
+persistent:,preserve-argv0:,ignore-family:"
75
+options=$(getopt -o ds:Q:S:e:hc:p:g:F:i: -l ${_longopts} -- "$@")
76
eval set -- "$options"
77
78
while true ; do
79
@@ -XXX,XX +XXX,XX @@ while true ; do
80
shift
81
PRESERVE_ARG0="$1"
82
;;
83
+ -i|--ignore-family)
84
+ shift
85
+ IGNORE_FAMILY="$1"
86
+ ;;
87
*)
88
break
89
;;
90
--
31
--
91
2.48.1
32
2.41.0
92
33
93
34
diff view generated by jsdifflib
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
The next HPM related changes requires the HPM overflow timer to be
3
In this patch, we create the APLIC and IMSIC FDT helper functions and
4
initialized by the riscv-iommu base emulation.
4
remove M mode AIA devices when using KVM acceleration.
5
5
6
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
6
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
7
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Reviewed-by: Jim Shu <jim.shu@sifive.com>
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Message-ID: <20250224190826.1858473-6-dbarboza@ventanamicro.com>
9
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
10
Message-ID: <20230727102439.22554-2-yongxuan.wang@sifive.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
12
---
12
hw/riscv/riscv-iommu-hpm.h | 1 +
13
hw/riscv/virt.c | 290 +++++++++++++++++++++++-------------------------
13
hw/riscv/riscv-iommu.h | 2 ++
14
1 file changed, 137 insertions(+), 153 deletions(-)
14
hw/riscv/riscv-iommu-hpm.c | 36 ++++++++++++++++++++++++++++++++++++
15
hw/riscv/riscv-iommu.c | 3 +++
16
4 files changed, 42 insertions(+)
17
15
18
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
16
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
19
index XXXXXXX..XXXXXXX 100644
17
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/riscv/riscv-iommu-hpm.h
18
--- a/hw/riscv/virt.c
21
+++ b/hw/riscv/riscv-iommu-hpm.h
19
+++ b/hw/riscv/virt.c
22
@@ -XXX,XX +XXX,XX @@
20
@@ -XXX,XX +XXX,XX @@ static uint32_t imsic_num_bits(uint32_t count)
23
uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
21
return ret;
24
void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
25
unsigned event_id);
26
+void riscv_iommu_hpm_timer_cb(void *priv);
27
28
#endif
29
diff --git a/hw/riscv/riscv-iommu.h b/hw/riscv/riscv-iommu.h
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/riscv/riscv-iommu.h
32
+++ b/hw/riscv/riscv-iommu.h
33
@@ -XXX,XX +XXX,XX @@ struct RISCVIOMMUState {
34
QLIST_HEAD(, RISCVIOMMUSpace) spaces;
35
36
/* HPM cycle counter */
37
+ QEMUTimer *hpm_timer;
38
uint64_t hpmcycle_val; /* Current value of cycle register */
39
uint64_t hpmcycle_prev; /* Saved value of QEMU_CLOCK_VIRTUAL clock */
40
+ uint64_t irq_overflow_left; /* Value beyond INT64_MAX after overflow */
41
42
/* HPM event counters */
43
GHashTable *hpm_event_ctr_map; /* Mapping of events to counters */
44
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
45
index XXXXXXX..XXXXXXX 100644
46
--- a/hw/riscv/riscv-iommu-hpm.c
47
+++ b/hw/riscv/riscv-iommu-hpm.c
48
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
49
hpm_incr_ctr(s, ctr_idx);
50
}
51
}
22
}
52
+
23
53
+/* Timer callback for cycle counter overflow. */
24
-static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
54
+void riscv_iommu_hpm_timer_cb(void *priv)
25
- uint32_t *phandle, uint32_t *intc_phandles,
26
- uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
27
+static void create_fdt_one_imsic(RISCVVirtState *s, hwaddr base_addr,
28
+ uint32_t *intc_phandles, uint32_t msi_phandle,
29
+ bool m_mode, uint32_t imsic_guest_bits)
30
{
31
int cpu, socket;
32
char *imsic_name;
33
MachineState *ms = MACHINE(s);
34
int socket_count = riscv_socket_count(ms);
35
- uint32_t imsic_max_hart_per_socket, imsic_guest_bits;
36
+ uint32_t imsic_max_hart_per_socket;
37
uint32_t *imsic_cells, *imsic_regs, imsic_addr, imsic_size;
38
39
- *msi_m_phandle = (*phandle)++;
40
- *msi_s_phandle = (*phandle)++;
41
imsic_cells = g_new0(uint32_t, ms->smp.cpus * 2);
42
imsic_regs = g_new0(uint32_t, socket_count * 4);
43
44
- /* M-level IMSIC node */
45
for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
46
imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
47
- imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
48
+ imsic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
49
}
50
- imsic_max_hart_per_socket = 0;
51
- for (socket = 0; socket < socket_count; socket++) {
52
- imsic_addr = memmap[VIRT_IMSIC_M].base +
53
- socket * VIRT_IMSIC_GROUP_MAX_SIZE;
54
- imsic_size = IMSIC_HART_SIZE(0) * s->soc[socket].num_harts;
55
- imsic_regs[socket * 4 + 0] = 0;
56
- imsic_regs[socket * 4 + 1] = cpu_to_be32(imsic_addr);
57
- imsic_regs[socket * 4 + 2] = 0;
58
- imsic_regs[socket * 4 + 3] = cpu_to_be32(imsic_size);
59
- if (imsic_max_hart_per_socket < s->soc[socket].num_harts) {
60
- imsic_max_hart_per_socket = s->soc[socket].num_harts;
61
- }
62
- }
63
- imsic_name = g_strdup_printf("/soc/imsics@%lx",
64
- (unsigned long)memmap[VIRT_IMSIC_M].base);
65
- qemu_fdt_add_subnode(ms->fdt, imsic_name);
66
- qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible",
67
- "riscv,imsics");
68
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#interrupt-cells",
69
- FDT_IMSIC_INT_CELLS);
70
- qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller",
71
- NULL, 0);
72
- qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller",
73
- NULL, 0);
74
- qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended",
75
- imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
76
- qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs,
77
- socket_count * sizeof(uint32_t) * 4);
78
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,num-ids",
79
- VIRT_IRQCHIP_NUM_MSIS);
80
- if (socket_count > 1) {
81
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,hart-index-bits",
82
- imsic_num_bits(imsic_max_hart_per_socket));
83
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-bits",
84
- imsic_num_bits(socket_count));
85
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-shift",
86
- IMSIC_MMIO_GROUP_MIN_SHIFT);
87
- }
88
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", *msi_m_phandle);
89
-
90
- g_free(imsic_name);
91
92
- /* S-level IMSIC node */
93
- for (cpu = 0; cpu < ms->smp.cpus; cpu++) {
94
- imsic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
95
- imsic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
96
- }
97
- imsic_guest_bits = imsic_num_bits(s->aia_guests + 1);
98
imsic_max_hart_per_socket = 0;
99
for (socket = 0; socket < socket_count; socket++) {
100
- imsic_addr = memmap[VIRT_IMSIC_S].base +
101
- socket * VIRT_IMSIC_GROUP_MAX_SIZE;
102
+ imsic_addr = base_addr + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
103
imsic_size = IMSIC_HART_SIZE(imsic_guest_bits) *
104
s->soc[socket].num_harts;
105
imsic_regs[socket * 4 + 0] = 0;
106
@@ -XXX,XX +XXX,XX @@ static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
107
imsic_max_hart_per_socket = s->soc[socket].num_harts;
108
}
109
}
110
- imsic_name = g_strdup_printf("/soc/imsics@%lx",
111
- (unsigned long)memmap[VIRT_IMSIC_S].base);
112
+
113
+ imsic_name = g_strdup_printf("/soc/imsics@%lx", (unsigned long)base_addr);
114
qemu_fdt_add_subnode(ms->fdt, imsic_name);
115
- qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible",
116
- "riscv,imsics");
117
+ qemu_fdt_setprop_string(ms->fdt, imsic_name, "compatible", "riscv,imsics");
118
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "#interrupt-cells",
119
- FDT_IMSIC_INT_CELLS);
120
- qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller",
121
- NULL, 0);
122
- qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller",
123
- NULL, 0);
124
+ FDT_IMSIC_INT_CELLS);
125
+ qemu_fdt_setprop(ms->fdt, imsic_name, "interrupt-controller", NULL, 0);
126
+ qemu_fdt_setprop(ms->fdt, imsic_name, "msi-controller", NULL, 0);
127
qemu_fdt_setprop(ms->fdt, imsic_name, "interrupts-extended",
128
- imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
129
+ imsic_cells, ms->smp.cpus * sizeof(uint32_t) * 2);
130
qemu_fdt_setprop(ms->fdt, imsic_name, "reg", imsic_regs,
131
- socket_count * sizeof(uint32_t) * 4);
132
+ socket_count * sizeof(uint32_t) * 4);
133
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,num-ids",
134
- VIRT_IRQCHIP_NUM_MSIS);
135
+ VIRT_IRQCHIP_NUM_MSIS);
136
+
137
if (imsic_guest_bits) {
138
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,guest-index-bits",
139
- imsic_guest_bits);
140
+ imsic_guest_bits);
141
}
142
+
143
if (socket_count > 1) {
144
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,hart-index-bits",
145
- imsic_num_bits(imsic_max_hart_per_socket));
146
+ imsic_num_bits(imsic_max_hart_per_socket));
147
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-bits",
148
- imsic_num_bits(socket_count));
149
+ imsic_num_bits(socket_count));
150
qemu_fdt_setprop_cell(ms->fdt, imsic_name, "riscv,group-index-shift",
151
- IMSIC_MMIO_GROUP_MIN_SHIFT);
152
+ IMSIC_MMIO_GROUP_MIN_SHIFT);
153
}
154
- qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", *msi_s_phandle);
155
- g_free(imsic_name);
156
+ qemu_fdt_setprop_cell(ms->fdt, imsic_name, "phandle", msi_phandle);
157
158
+ g_free(imsic_name);
159
g_free(imsic_regs);
160
g_free(imsic_cells);
161
}
162
163
-static void create_fdt_socket_aplic(RISCVVirtState *s,
164
- const MemMapEntry *memmap, int socket,
165
- uint32_t msi_m_phandle,
166
- uint32_t msi_s_phandle,
167
- uint32_t *phandle,
168
- uint32_t *intc_phandles,
169
- uint32_t *aplic_phandles)
170
+static void create_fdt_imsic(RISCVVirtState *s, const MemMapEntry *memmap,
171
+ uint32_t *phandle, uint32_t *intc_phandles,
172
+ uint32_t *msi_m_phandle, uint32_t *msi_s_phandle)
55
+{
173
+{
56
+ RISCVIOMMUState *s = priv;
174
+ *msi_m_phandle = (*phandle)++;
57
+ const uint32_t inhibit = riscv_iommu_reg_get32(
175
+ *msi_s_phandle = (*phandle)++;
58
+ s, RISCV_IOMMU_REG_IOCOUNTINH);
176
+
59
+ uint32_t ovf;
177
+ if (!kvm_enabled()) {
60
+
178
+ /* M-level IMSIC node */
61
+ if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
179
+ create_fdt_one_imsic(s, memmap[VIRT_IMSIC_M].base, intc_phandles,
62
+ return;
180
+ *msi_m_phandle, true, 0);
63
+ }
181
+ }
64
+
182
+
65
+ if (s->irq_overflow_left > 0) {
183
+ /* S-level IMSIC node */
66
+ uint64_t irq_trigger_at =
184
+ create_fdt_one_imsic(s, memmap[VIRT_IMSIC_S].base, intc_phandles,
67
+ qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->irq_overflow_left;
185
+ *msi_s_phandle, false,
68
+ timer_mod_anticipate_ns(s->hpm_timer, irq_trigger_at);
186
+ imsic_num_bits(s->aia_guests + 1));
69
+ s->irq_overflow_left = 0;
187
+
70
+ return;
188
+}
189
+
190
+static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
191
+ unsigned long aplic_addr, uint32_t aplic_size,
192
+ uint32_t msi_phandle,
193
+ uint32_t *intc_phandles,
194
+ uint32_t aplic_phandle,
195
+ uint32_t aplic_child_phandle,
196
+ bool m_mode)
197
{
198
int cpu;
199
char *aplic_name;
200
uint32_t *aplic_cells;
201
- unsigned long aplic_addr;
202
MachineState *ms = MACHINE(s);
203
- uint32_t aplic_m_phandle, aplic_s_phandle;
204
205
- aplic_m_phandle = (*phandle)++;
206
- aplic_s_phandle = (*phandle)++;
207
aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
208
209
- /* M-level APLIC node */
210
for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
211
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
212
- aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_M_EXT);
213
+ aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
214
}
215
- aplic_addr = memmap[VIRT_APLIC_M].base +
216
- (memmap[VIRT_APLIC_M].size * socket);
217
+
218
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
219
qemu_fdt_add_subnode(ms->fdt, aplic_name);
220
qemu_fdt_setprop_string(ms->fdt, aplic_name, "compatible", "riscv,aplic");
221
qemu_fdt_setprop_cell(ms->fdt, aplic_name,
222
- "#interrupt-cells", FDT_APLIC_INT_CELLS);
223
+ "#interrupt-cells", FDT_APLIC_INT_CELLS);
224
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupt-controller", NULL, 0);
225
+
226
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
227
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
228
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
229
+ aplic_cells,
230
+ s->soc[socket].num_harts * sizeof(uint32_t) * 2);
231
} else {
232
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent",
233
- msi_m_phandle);
234
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent", msi_phandle);
235
}
236
+
237
qemu_fdt_setprop_cells(ms->fdt, aplic_name, "reg",
238
- 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_M].size);
239
+ 0x0, aplic_addr, 0x0, aplic_size);
240
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,num-sources",
241
- VIRT_IRQCHIP_NUM_SOURCES);
242
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,children",
243
- aplic_s_phandle);
244
- qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegate",
245
- aplic_s_phandle, 0x1, VIRT_IRQCHIP_NUM_SOURCES);
246
+ VIRT_IRQCHIP_NUM_SOURCES);
247
+
248
+ if (aplic_child_phandle) {
249
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,children",
250
+ aplic_child_phandle);
251
+ qemu_fdt_setprop_cells(ms->fdt, aplic_name, "riscv,delegate",
252
+ aplic_child_phandle, 0x1,
253
+ VIRT_IRQCHIP_NUM_SOURCES);
71
+ }
254
+ }
72
+
255
+
73
+ ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
256
riscv_socket_fdt_write_id(ms, aplic_name, socket);
74
+ if (!get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY)) {
257
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_m_phandle);
75
+ /*
258
+ qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_phandle);
76
+ * We don't need to set hpmcycle_val to zero and update hpmcycle_prev to
259
+
77
+ * current clock value. The way we calculate iohpmcycs will overflow
260
g_free(aplic_name);
78
+ * and return the correct value. This avoids the need to synchronize
261
+ g_free(aplic_cells);
79
+ * timer callback and write callback.
80
+ */
81
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
82
+ RISCV_IOMMU_IOCOUNTOVF_CY, 0);
83
+ riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_IOHPMCYCLES,
84
+ RISCV_IOMMU_IOHPMCYCLES_OVF, 0);
85
+ riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
86
+ }
87
+}
262
+}
88
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
263
89
index XXXXXXX..XXXXXXX 100644
264
- /* S-level APLIC node */
90
--- a/hw/riscv/riscv-iommu.c
265
- for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
91
+++ b/hw/riscv/riscv-iommu.c
266
- aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
92
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_realize(DeviceState *dev, Error **errp)
267
- aplic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT);
93
address_space_init(&s->trap_as, &s->trap_mr, "riscv-iommu-trap-as");
268
+static void create_fdt_socket_aplic(RISCVVirtState *s,
94
269
+ const MemMapEntry *memmap, int socket,
95
if (s->cap & RISCV_IOMMU_CAP_HPM) {
270
+ uint32_t msi_m_phandle,
96
+ s->hpm_timer =
271
+ uint32_t msi_s_phandle,
97
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, riscv_iommu_hpm_timer_cb, s);
272
+ uint32_t *phandle,
98
s->hpm_event_ctr_map = g_hash_table_new(g_direct_hash, g_direct_equal);
273
+ uint32_t *intc_phandles,
99
}
274
+ uint32_t *aplic_phandles)
275
+{
276
+ char *aplic_name;
277
+ unsigned long aplic_addr;
278
+ MachineState *ms = MACHINE(s);
279
+ uint32_t aplic_m_phandle, aplic_s_phandle;
280
+
281
+ aplic_m_phandle = (*phandle)++;
282
+ aplic_s_phandle = (*phandle)++;
283
+
284
+ if (!kvm_enabled()) {
285
+ /* M-level APLIC node */
286
+ aplic_addr = memmap[VIRT_APLIC_M].base +
287
+ (memmap[VIRT_APLIC_M].size * socket);
288
+ create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size,
289
+ msi_m_phandle, intc_phandles,
290
+ aplic_m_phandle, aplic_s_phandle,
291
+ true);
292
}
293
+
294
+ /* S-level APLIC node */
295
aplic_addr = memmap[VIRT_APLIC_S].base +
296
(memmap[VIRT_APLIC_S].size * socket);
297
+ create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size,
298
+ msi_s_phandle, intc_phandles,
299
+ aplic_s_phandle, 0,
300
+ false);
301
+
302
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
303
- qemu_fdt_add_subnode(ms->fdt, aplic_name);
304
- qemu_fdt_setprop_string(ms->fdt, aplic_name, "compatible", "riscv,aplic");
305
- qemu_fdt_setprop_cell(ms->fdt, aplic_name,
306
- "#interrupt-cells", FDT_APLIC_INT_CELLS);
307
- qemu_fdt_setprop(ms->fdt, aplic_name, "interrupt-controller", NULL, 0);
308
- if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
309
- qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
310
- aplic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 2);
311
- } else {
312
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent",
313
- msi_s_phandle);
314
- }
315
- qemu_fdt_setprop_cells(ms->fdt, aplic_name, "reg",
316
- 0x0, aplic_addr, 0x0, memmap[VIRT_APLIC_S].size);
317
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "riscv,num-sources",
318
- VIRT_IRQCHIP_NUM_SOURCES);
319
- riscv_socket_fdt_write_id(ms, aplic_name, socket);
320
- qemu_fdt_setprop_cell(ms->fdt, aplic_name, "phandle", aplic_s_phandle);
321
322
if (!socket) {
323
platform_bus_add_all_fdt_nodes(ms->fdt, aplic_name,
324
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
325
326
g_free(aplic_name);
327
328
- g_free(aplic_cells);
329
aplic_phandles[socket] = aplic_s_phandle;
100
}
330
}
101
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_unrealize(DeviceState *dev)
331
102
332
@@ -XXX,XX +XXX,XX @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
103
if (s->cap & RISCV_IOMMU_CAP_HPM) {
333
int i;
104
g_hash_table_unref(s->hpm_event_ctr_map);
334
hwaddr addr;
105
+ timer_free(s->hpm_timer);
335
uint32_t guest_bits;
106
}
336
- DeviceState *aplic_m;
337
- bool msimode = (aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) ? true : false;
338
+ DeviceState *aplic_s = NULL;
339
+ DeviceState *aplic_m = NULL;
340
+ bool msimode = aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
341
342
if (msimode) {
343
- /* Per-socket M-level IMSICs */
344
- addr = memmap[VIRT_IMSIC_M].base + socket * VIRT_IMSIC_GROUP_MAX_SIZE;
345
- for (i = 0; i < hart_count; i++) {
346
- riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
347
- base_hartid + i, true, 1,
348
- VIRT_IRQCHIP_NUM_MSIS);
349
+ if (!kvm_enabled()) {
350
+ /* Per-socket M-level IMSICs */
351
+ addr = memmap[VIRT_IMSIC_M].base +
352
+ socket * VIRT_IMSIC_GROUP_MAX_SIZE;
353
+ for (i = 0; i < hart_count; i++) {
354
+ riscv_imsic_create(addr + i * IMSIC_HART_SIZE(0),
355
+ base_hartid + i, true, 1,
356
+ VIRT_IRQCHIP_NUM_MSIS);
357
+ }
358
}
359
360
/* Per-socket S-level IMSICs */
361
@@ -XXX,XX +XXX,XX @@ static DeviceState *virt_create_aia(RISCVVirtAIAType aia_type, int aia_guests,
362
}
363
}
364
365
- /* Per-socket M-level APLIC */
366
- aplic_m = riscv_aplic_create(
367
- memmap[VIRT_APLIC_M].base + socket * memmap[VIRT_APLIC_M].size,
368
- memmap[VIRT_APLIC_M].size,
369
- (msimode) ? 0 : base_hartid,
370
- (msimode) ? 0 : hart_count,
371
- VIRT_IRQCHIP_NUM_SOURCES,
372
- VIRT_IRQCHIP_NUM_PRIO_BITS,
373
- msimode, true, NULL);
374
-
375
- if (aplic_m) {
376
- /* Per-socket S-level APLIC */
377
- riscv_aplic_create(
378
- memmap[VIRT_APLIC_S].base + socket * memmap[VIRT_APLIC_S].size,
379
- memmap[VIRT_APLIC_S].size,
380
- (msimode) ? 0 : base_hartid,
381
- (msimode) ? 0 : hart_count,
382
- VIRT_IRQCHIP_NUM_SOURCES,
383
- VIRT_IRQCHIP_NUM_PRIO_BITS,
384
- msimode, false, aplic_m);
385
+ if (!kvm_enabled()) {
386
+ /* Per-socket M-level APLIC */
387
+ aplic_m = riscv_aplic_create(memmap[VIRT_APLIC_M].base +
388
+ socket * memmap[VIRT_APLIC_M].size,
389
+ memmap[VIRT_APLIC_M].size,
390
+ (msimode) ? 0 : base_hartid,
391
+ (msimode) ? 0 : hart_count,
392
+ VIRT_IRQCHIP_NUM_SOURCES,
393
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
394
+ msimode, true, NULL);
395
}
396
397
- return aplic_m;
398
+ /* Per-socket S-level APLIC */
399
+ aplic_s = riscv_aplic_create(memmap[VIRT_APLIC_S].base +
400
+ socket * memmap[VIRT_APLIC_S].size,
401
+ memmap[VIRT_APLIC_S].size,
402
+ (msimode) ? 0 : base_hartid,
403
+ (msimode) ? 0 : hart_count,
404
+ VIRT_IRQCHIP_NUM_SOURCES,
405
+ VIRT_IRQCHIP_NUM_PRIO_BITS,
406
+ msimode, false, aplic_m);
407
+
408
+ return kvm_enabled() ? aplic_s : aplic_m;
107
}
409
}
108
410
411
static void create_platform_bus(RISCVVirtState *s, DeviceState *irqchip)
109
--
412
--
110
2.48.1
413
2.41.0
diff view generated by jsdifflib
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
When the IMSIC is emulated in the kernel, the GPIO output lines to CPUs
3
We check the in-kernel irqchip support when using KVM acceleration.
4
and aia_ireg_rmw_fn setting can be remove. In this case the IMSIC
5
trigger CPU interrupts by KVM APIs, and the RMW of IREG is handled in
6
kernel.
7
8
This patch also move the code that claim the CPU interrupts to the
9
beginning of IMSIC realization. This can avoid the unnecessary resource
10
allocation before checking failed.
11
4
12
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
5
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
6
Reviewed-by: Jim Shu <jim.shu@sifive.com>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Message-ID: <20250224025722.3999-2-yongxuan.wang@sifive.com>
8
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
9
Message-ID: <20230727102439.22554-3-yongxuan.wang@sifive.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
11
---
17
hw/intc/riscv_imsic.c | 47 ++++++++++++++++++++++++-------------------
12
target/riscv/kvm.c | 10 +++++++++-
18
1 file changed, 26 insertions(+), 21 deletions(-)
13
1 file changed, 9 insertions(+), 1 deletion(-)
19
14
20
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
15
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
21
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
22
--- a/hw/intc/riscv_imsic.c
17
--- a/target/riscv/kvm.c
23
+++ b/hw/intc/riscv_imsic.c
18
+++ b/target/riscv/kvm.c
24
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
19
@@ -XXX,XX +XXX,XX @@ int kvm_arch_init(MachineState *ms, KVMState *s)
25
CPUState *cpu = cpu_by_arch_id(imsic->hartid);
20
26
CPURISCVState *env = cpu ? cpu_env(cpu) : NULL;
21
int kvm_arch_irqchip_create(KVMState *s)
27
22
{
28
+ /* Claim the CPU interrupt to be triggered by this IMSIC */
23
- return 0;
29
+ if (riscv_cpu_claim_interrupts(rcpu,
24
+ if (kvm_kernel_irqchip_split()) {
30
+ (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
25
+ error_report("-machine kernel_irqchip=split is not supported on RISC-V.");
31
+ error_setg(errp, "%s already claimed",
26
+ exit(1);
32
+ (imsic->mmode) ? "MEIP" : "SEIP");
33
+ return;
34
+ }
27
+ }
35
+
28
+
36
if (!kvm_irqchip_in_kernel()) {
29
+ /*
37
+ /* Create output IRQ lines */
30
+ * We can create the VAIA using the newer device control API.
38
+ imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
31
+ */
39
+ qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
32
+ return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
40
+
33
}
41
imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
34
42
imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
35
int kvm_arch_process_async_events(CPUState *cs)
43
imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
44
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
45
IMSIC_MMIO_SIZE(imsic->num_pages));
46
sysbus_init_mmio(SYS_BUS_DEVICE(dev), &imsic->mmio);
47
48
- /* Claim the CPU interrupt to be triggered by this IMSIC */
49
- if (riscv_cpu_claim_interrupts(rcpu,
50
- (imsic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
51
- error_setg(errp, "%s already claimed",
52
- (imsic->mmode) ? "MEIP" : "SEIP");
53
- return;
54
- }
55
-
56
- /* Create output IRQ lines */
57
- imsic->external_irqs = g_malloc(sizeof(qemu_irq) * imsic->num_pages);
58
- qdev_init_gpio_out(dev, imsic->external_irqs, imsic->num_pages);
59
-
60
/* Force select AIA feature and setup CSR read-modify-write callback */
61
if (env) {
62
if (!imsic->mmode) {
63
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
64
} else {
65
rcpu->cfg.ext_smaia = true;
66
}
67
- riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
68
- riscv_imsic_rmw, imsic);
69
+
70
+ if (!kvm_irqchip_in_kernel()) {
71
+ riscv_cpu_set_aia_ireg_rmw_fn(env, (imsic->mmode) ? PRV_M : PRV_S,
72
+ riscv_imsic_rmw, imsic);
73
+ }
74
}
75
76
msi_nonbroken = true;
77
@@ -XXX,XX +XXX,XX @@ DeviceState *riscv_imsic_create(hwaddr addr, uint32_t hartid, bool mmode,
78
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
79
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
80
81
- for (i = 0; i < num_pages; i++) {
82
- if (!i) {
83
- qdev_connect_gpio_out_named(dev, NULL, i,
84
- qdev_get_gpio_in(DEVICE(cpu),
85
+ if (!kvm_irqchip_in_kernel()) {
86
+ for (i = 0; i < num_pages; i++) {
87
+ if (!i) {
88
+ qdev_connect_gpio_out_named(dev, NULL, i,
89
+ qdev_get_gpio_in(DEVICE(cpu),
90
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
91
- } else {
92
- qdev_connect_gpio_out_named(dev, NULL, i,
93
- qdev_get_gpio_in(DEVICE(cpu),
94
+ } else {
95
+ qdev_connect_gpio_out_named(dev, NULL, i,
96
+ qdev_get_gpio_in(DEVICE(cpu),
97
IRQ_LOCAL_MAX + i - 1));
98
+ }
99
}
100
}
101
102
--
36
--
103
2.48.1
37
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
We're missing scounteren and senvcfg CSRs, both already present in the
3
We create a vAIA chip by using the KVM_DEV_TYPE_RISCV_AIA and then set up
4
KVM UAPI.
4
the chip with the KVM_DEV_RISCV_AIA_GRP_* APIs.
5
We also extend KVM accelerator to specify the KVM AIA mode. The "riscv-aia"
6
parameter is passed along with --accel in QEMU command-line.
7
1) "riscv-aia=emul": IMSIC is emulated by hypervisor
8
2) "riscv-aia=hwaccel": use hardware guest IMSIC
9
3) "riscv-aia=auto": use the hardware guest IMSICs whenever available
10
otherwise we fallback to software emulation.
5
11
6
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
13
Reviewed-by: Jim Shu <jim.shu@sifive.com>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
15
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
8
Acked-by: Alistair Francis <alistair.francis@wdc.com>
16
Message-ID: <20230727102439.22554-4-yongxuan.wang@sifive.com>
9
Message-ID: <20250224123120.1644186-4-dbarboza@ventanamicro.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
18
---
12
target/riscv/kvm/kvm-cpu.c | 6 ++++++
19
target/riscv/kvm_riscv.h | 4 +
13
1 file changed, 6 insertions(+)
20
target/riscv/kvm.c | 186 +++++++++++++++++++++++++++++++++++++++
21
2 files changed, 190 insertions(+)
14
22
15
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
23
diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h
16
index XXXXXXX..XXXXXXX 100644
24
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/kvm/kvm-cpu.c
25
--- a/target/riscv/kvm_riscv.h
18
+++ b/target/riscv/kvm/kvm-cpu.c
26
+++ b/target/riscv/kvm_riscv.h
19
@@ -XXX,XX +XXX,XX @@ static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
27
@@ -XXX,XX +XXX,XX @@
20
env->stval = 0;
28
void kvm_riscv_init_user_properties(Object *cpu_obj);
21
env->mip = 0;
29
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
22
env->satp = 0;
30
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
23
+ env->scounteren = 0;
31
+void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
24
+ env->senvcfg = 0;
32
+ uint64_t aia_irq_num, uint64_t aia_msi_num,
33
+ uint64_t aplic_base, uint64_t imsic_base,
34
+ uint64_t guest_num);
35
36
#endif
37
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
38
index XXXXXXX..XXXXXXX 100644
39
--- a/target/riscv/kvm.c
40
+++ b/target/riscv/kvm.c
41
@@ -XXX,XX +XXX,XX @@
42
#include "exec/address-spaces.h"
43
#include "hw/boards.h"
44
#include "hw/irq.h"
45
+#include "hw/intc/riscv_imsic.h"
46
#include "qemu/log.h"
47
#include "hw/loader.h"
48
#include "kvm_riscv.h"
49
@@ -XXX,XX +XXX,XX @@
50
#include "chardev/char-fe.h"
51
#include "migration/migration.h"
52
#include "sysemu/runstate.h"
53
+#include "hw/riscv/numa.h"
54
55
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
56
uint64_t idx)
57
@@ -XXX,XX +XXX,XX @@ bool kvm_arch_cpu_check_are_resettable(void)
58
return true;
25
}
59
}
26
60
27
static int kvm_riscv_get_regs_csr(CPUState *cs)
61
+static int aia_mode;
28
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_get_regs_csr(CPUState *cs)
62
+
29
KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
63
+static const char *kvm_aia_mode_str(uint64_t mode)
30
KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
64
+{
31
KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
65
+ switch (mode) {
32
+ KVM_RISCV_GET_CSR(cs, env, scounteren, env->scounteren);
66
+ case KVM_DEV_RISCV_AIA_MODE_EMUL:
33
+ KVM_RISCV_GET_CSR(cs, env, senvcfg, env->senvcfg);
67
+ return "emul";
34
68
+ case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
35
return 0;
69
+ return "hwaccel";
36
}
70
+ case KVM_DEV_RISCV_AIA_MODE_AUTO:
37
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_put_regs_csr(CPUState *cs)
71
+ default:
38
KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
72
+ return "auto";
39
KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
73
+ };
40
KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
74
+}
41
+ KVM_RISCV_SET_CSR(cs, env, scounteren, env->scounteren);
75
+
42
+ KVM_RISCV_SET_CSR(cs, env, senvcfg, env->senvcfg);
76
+static char *riscv_get_kvm_aia(Object *obj, Error **errp)
43
77
+{
44
return 0;
78
+ return g_strdup(kvm_aia_mode_str(aia_mode));
79
+}
80
+
81
+static void riscv_set_kvm_aia(Object *obj, const char *val, Error **errp)
82
+{
83
+ if (!strcmp(val, "emul")) {
84
+ aia_mode = KVM_DEV_RISCV_AIA_MODE_EMUL;
85
+ } else if (!strcmp(val, "hwaccel")) {
86
+ aia_mode = KVM_DEV_RISCV_AIA_MODE_HWACCEL;
87
+ } else if (!strcmp(val, "auto")) {
88
+ aia_mode = KVM_DEV_RISCV_AIA_MODE_AUTO;
89
+ } else {
90
+ error_setg(errp, "Invalid KVM AIA mode");
91
+ error_append_hint(errp, "Valid values are emul, hwaccel, and auto.\n");
92
+ }
93
+}
94
+
95
void kvm_arch_accel_class_init(ObjectClass *oc)
96
{
97
+ object_class_property_add_str(oc, "riscv-aia", riscv_get_kvm_aia,
98
+ riscv_set_kvm_aia);
99
+ object_class_property_set_description(oc, "riscv-aia",
100
+ "Set KVM AIA mode. Valid values are "
101
+ "emul, hwaccel, and auto. Default "
102
+ "is auto.");
103
+ object_property_set_default_str(object_class_property_find(oc, "riscv-aia"),
104
+ "auto");
105
+}
106
+
107
+void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
108
+ uint64_t aia_irq_num, uint64_t aia_msi_num,
109
+ uint64_t aplic_base, uint64_t imsic_base,
110
+ uint64_t guest_num)
111
+{
112
+ int ret, i;
113
+ int aia_fd = -1;
114
+ uint64_t default_aia_mode;
115
+ uint64_t socket_count = riscv_socket_count(machine);
116
+ uint64_t max_hart_per_socket = 0;
117
+ uint64_t socket, base_hart, hart_count, socket_imsic_base, imsic_addr;
118
+ uint64_t socket_bits, hart_bits, guest_bits;
119
+
120
+ aia_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_RISCV_AIA, false);
121
+
122
+ if (aia_fd < 0) {
123
+ error_report("Unable to create in-kernel irqchip");
124
+ exit(1);
125
+ }
126
+
127
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
128
+ KVM_DEV_RISCV_AIA_CONFIG_MODE,
129
+ &default_aia_mode, false, NULL);
130
+ if (ret < 0) {
131
+ error_report("KVM AIA: failed to get current KVM AIA mode");
132
+ exit(1);
133
+ }
134
+ qemu_log("KVM AIA: default mode is %s\n",
135
+ kvm_aia_mode_str(default_aia_mode));
136
+
137
+ if (default_aia_mode != aia_mode) {
138
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
139
+ KVM_DEV_RISCV_AIA_CONFIG_MODE,
140
+ &aia_mode, true, NULL);
141
+ if (ret < 0)
142
+ warn_report("KVM AIA: failed to set KVM AIA mode");
143
+ else
144
+ qemu_log("KVM AIA: set current mode to %s\n",
145
+ kvm_aia_mode_str(aia_mode));
146
+ }
147
+
148
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
149
+ KVM_DEV_RISCV_AIA_CONFIG_SRCS,
150
+ &aia_irq_num, true, NULL);
151
+ if (ret < 0) {
152
+ error_report("KVM AIA: failed to set number of input irq lines");
153
+ exit(1);
154
+ }
155
+
156
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
157
+ KVM_DEV_RISCV_AIA_CONFIG_IDS,
158
+ &aia_msi_num, true, NULL);
159
+ if (ret < 0) {
160
+ error_report("KVM AIA: failed to set number of msi");
161
+ exit(1);
162
+ }
163
+
164
+ socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1;
165
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
166
+ KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS,
167
+ &socket_bits, true, NULL);
168
+ if (ret < 0) {
169
+ error_report("KVM AIA: failed to set group_bits");
170
+ exit(1);
171
+ }
172
+
173
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
174
+ KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT,
175
+ &group_shift, true, NULL);
176
+ if (ret < 0) {
177
+ error_report("KVM AIA: failed to set group_shift");
178
+ exit(1);
179
+ }
180
+
181
+ guest_bits = guest_num == 0 ? 0 :
182
+ find_last_bit(&guest_num, BITS_PER_LONG) + 1;
183
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
184
+ KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS,
185
+ &guest_bits, true, NULL);
186
+ if (ret < 0) {
187
+ error_report("KVM AIA: failed to set guest_bits");
188
+ exit(1);
189
+ }
190
+
191
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
192
+ KVM_DEV_RISCV_AIA_ADDR_APLIC,
193
+ &aplic_base, true, NULL);
194
+ if (ret < 0) {
195
+ error_report("KVM AIA: failed to set the base address of APLIC");
196
+ exit(1);
197
+ }
198
+
199
+ for (socket = 0; socket < socket_count; socket++) {
200
+ socket_imsic_base = imsic_base + socket * (1U << group_shift);
201
+ hart_count = riscv_socket_hart_count(machine, socket);
202
+ base_hart = riscv_socket_first_hartid(machine, socket);
203
+
204
+ if (max_hart_per_socket < hart_count) {
205
+ max_hart_per_socket = hart_count;
206
+ }
207
+
208
+ for (i = 0; i < hart_count; i++) {
209
+ imsic_addr = socket_imsic_base + i * IMSIC_HART_SIZE(guest_bits);
210
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_ADDR,
211
+ KVM_DEV_RISCV_AIA_ADDR_IMSIC(i + base_hart),
212
+ &imsic_addr, true, NULL);
213
+ if (ret < 0) {
214
+ error_report("KVM AIA: failed to set the IMSIC address for hart %d", i);
215
+ exit(1);
216
+ }
217
+ }
218
+ }
219
+
220
+ hart_bits = find_last_bit(&max_hart_per_socket, BITS_PER_LONG) + 1;
221
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG,
222
+ KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
223
+ &hart_bits, true, NULL);
224
+ if (ret < 0) {
225
+ error_report("KVM AIA: failed to set hart_bits");
226
+ exit(1);
227
+ }
228
+
229
+ if (kvm_has_gsi_routing()) {
230
+ for (uint64_t idx = 0; idx < aia_irq_num + 1; ++idx) {
231
+ /* KVM AIA only has one APLIC instance */
232
+ kvm_irqchip_add_irq_route(kvm_state, idx, 0, idx);
233
+ }
234
+ kvm_gsi_routing_allowed = true;
235
+ kvm_irqchip_commit_routes(kvm_state);
236
+ }
237
+
238
+ ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CTRL,
239
+ KVM_DEV_RISCV_AIA_CTRL_INIT,
240
+ NULL, true, NULL);
241
+ if (ret < 0) {
242
+ error_report("KVM AIA: initialized fail");
243
+ exit(1);
244
+ }
245
+
246
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
45
}
247
}
46
--
248
--
47
2.48.1
249
2.41.0
diff view generated by jsdifflib
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
When the APLIC is emulated in the kernel, the GPIO output lines to CPUs
3
KVM AIA can't emulate APLIC only. When "aia=aplic" parameter is passed,
4
can be remove. In this case the APLIC trigger CPU interrupts by KVM APIs.
4
APLIC devices is emulated by QEMU. For "aia=aplic-imsic", remove the
5
5
mmio operations of APLIC when using KVM AIA and send wired interrupt
6
This patch also move the code that claim the CPU interrupts to the
6
signal via KVM_IRQ_LINE API.
7
beginning of APLIC realization. This can avoid the unnecessary resource
7
After KVM AIA enabled, MSI messages are delivered by KVM_SIGNAL_MSI API
8
allocation before checking failed.
8
when the IMSICs receive mmio write requests.
9
9
10
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
10
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
11
Reviewed-by: Jim Shu <jim.shu@sifive.com>
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Message-ID: <20250224025722.3999-3-yongxuan.wang@sifive.com>
13
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
14
Message-ID: <20230727102439.22554-5-yongxuan.wang@sifive.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
16
---
15
hw/intc/riscv_aplic.c | 49 +++++++++++++++++++++++--------------------
17
hw/intc/riscv_aplic.c | 56 ++++++++++++++++++++++++++++++-------------
16
1 file changed, 26 insertions(+), 23 deletions(-)
18
hw/intc/riscv_imsic.c | 25 +++++++++++++++----
19
2 files changed, 61 insertions(+), 20 deletions(-)
17
20
18
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
21
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
19
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/intc/riscv_aplic.c
23
--- a/hw/intc/riscv_aplic.c
21
+++ b/hw/intc/riscv_aplic.c
24
+++ b/hw/intc/riscv_aplic.c
25
@@ -XXX,XX +XXX,XX @@
26
#include "hw/irq.h"
27
#include "target/riscv/cpu.h"
28
#include "sysemu/sysemu.h"
29
+#include "sysemu/kvm.h"
30
#include "migration/vmstate.h"
31
32
#define APLIC_MAX_IDC (1UL << 14)
33
@@ -XXX,XX +XXX,XX @@
34
35
#define APLIC_IDC_CLAIMI 0x1c
36
37
+/*
38
+ * KVM AIA only supports APLIC MSI, fallback to QEMU emulation if we want to use
39
+ * APLIC Wired.
40
+ */
41
+static bool is_kvm_aia(bool msimode)
42
+{
43
+ return kvm_irqchip_in_kernel() && msimode;
44
+}
45
+
46
static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic,
47
uint32_t word)
48
{
49
@@ -XXX,XX +XXX,XX @@ static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc)
50
return topi;
51
}
52
53
+static void riscv_kvm_aplic_request(void *opaque, int irq, int level)
54
+{
55
+ kvm_set_irq(kvm_state, irq, !!level);
56
+}
57
+
58
static void riscv_aplic_request(void *opaque, int irq, int level)
59
{
60
bool update = false;
22
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
61
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
62
uint32_t i;
23
RISCVAPLICState *aplic = RISCV_APLIC(dev);
63
RISCVAPLICState *aplic = RISCV_APLIC(dev);
24
64
25
if (riscv_use_emulated_aplic(aplic->msimode)) {
65
- aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
26
+ /* Create output IRQ lines for non-MSI mode */
66
- aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
67
- aplic->state = g_new0(uint32_t, aplic->num_irqs);
68
- aplic->target = g_new0(uint32_t, aplic->num_irqs);
69
- if (!aplic->msimode) {
70
- for (i = 0; i < aplic->num_irqs; i++) {
71
- aplic->target[i] = 1;
72
+ if (!is_kvm_aia(aplic->msimode)) {
73
+ aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
74
+ aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
75
+ aplic->state = g_new0(uint32_t, aplic->num_irqs);
76
+ aplic->target = g_new0(uint32_t, aplic->num_irqs);
27
+ if (!aplic->msimode) {
77
+ if (!aplic->msimode) {
28
+ /* Claim the CPU interrupt to be triggered by this APLIC */
78
+ for (i = 0; i < aplic->num_irqs; i++) {
29
+ for (i = 0; i < aplic->num_harts; i++) {
79
+ aplic->target[i] = 1;
30
+ RISCVCPU *cpu;
31
+
32
+ cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i));
33
+ if (riscv_cpu_claim_interrupts(cpu,
34
+ (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
35
+ error_report("%s already claimed",
36
+ (aplic->mmode) ? "MEIP" : "SEIP");
37
+ exit(1);
38
+ }
39
+ }
40
+
41
+ aplic->external_irqs = g_malloc(sizeof(qemu_irq) *
42
+ aplic->num_harts);
43
+ qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts);
44
+ }
45
+
46
aplic->bitfield_words = (aplic->num_irqs + 31) >> 5;
47
aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs);
48
aplic->state = g_new0(uint32_t, aplic->num_irqs);
49
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
50
}
51
}
52
53
- /* Create output IRQ lines for non-MSI mode */
54
- if (!aplic->msimode) {
55
- aplic->external_irqs = g_malloc(sizeof(qemu_irq) * aplic->num_harts);
56
- qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts);
57
-
58
- /* Claim the CPU interrupt to be triggered by this APLIC */
59
- for (i = 0; i < aplic->num_harts; i++) {
60
- RISCVCPU *cpu = RISCV_CPU(cpu_by_arch_id(aplic->hartid_base + i));
61
- if (riscv_cpu_claim_interrupts(cpu,
62
- (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) {
63
- error_report("%s already claimed",
64
- (aplic->mmode) ? "MEIP" : "SEIP");
65
- exit(1);
66
- }
67
- }
68
- }
69
-
70
msi_nonbroken = true;
71
}
72
73
@@ -XXX,XX +XXX,XX @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
74
75
if (riscv_use_emulated_aplic(msimode)) {
76
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
77
- }
78
79
- if (!msimode) {
80
- for (i = 0; i < num_harts; i++) {
81
- CPUState *cpu = cpu_by_arch_id(hartid_base + i);
82
+ if (!msimode) {
83
+ for (i = 0; i < num_harts; i++) {
84
+ CPUState *cpu = cpu_by_arch_id(hartid_base + i);
85
86
- qdev_connect_gpio_out_named(dev, NULL, i,
87
- qdev_get_gpio_in(DEVICE(cpu),
88
+ qdev_connect_gpio_out_named(dev, NULL, i,
89
+ qdev_get_gpio_in(DEVICE(cpu),
90
(mmode) ? IRQ_M_EXT : IRQ_S_EXT));
91
+ }
80
+ }
92
}
81
}
82
- }
83
- aplic->idelivery = g_new0(uint32_t, aplic->num_harts);
84
- aplic->iforce = g_new0(uint32_t, aplic->num_harts);
85
- aplic->ithreshold = g_new0(uint32_t, aplic->num_harts);
86
+ aplic->idelivery = g_new0(uint32_t, aplic->num_harts);
87
+ aplic->iforce = g_new0(uint32_t, aplic->num_harts);
88
+ aplic->ithreshold = g_new0(uint32_t, aplic->num_harts);
89
90
- memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops, aplic,
91
- TYPE_RISCV_APLIC, aplic->aperture_size);
92
- sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio);
93
+ memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops,
94
+ aplic, TYPE_RISCV_APLIC, aplic->aperture_size);
95
+ sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio);
96
+ }
97
98
/*
99
* Only root APLICs have hardware IRQ lines. All non-root APLICs
100
* have IRQ lines delegated by their parent APLIC.
101
*/
102
if (!aplic->parent) {
103
- qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
104
+ if (is_kvm_aia(aplic->msimode)) {
105
+ qdev_init_gpio_in(dev, riscv_kvm_aplic_request, aplic->num_irqs);
106
+ } else {
107
+ qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
108
+ }
93
}
109
}
94
110
111
/* Create output IRQ lines for non-MSI mode */
112
@@ -XXX,XX +XXX,XX @@ DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size,
113
qdev_prop_set_bit(dev, "mmode", mmode);
114
115
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
116
- sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
117
+
118
+ if (!is_kvm_aia(msimode)) {
119
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr);
120
+ }
121
122
if (parent) {
123
riscv_aplic_add_child(parent, dev);
124
diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c
125
index XXXXXXX..XXXXXXX 100644
126
--- a/hw/intc/riscv_imsic.c
127
+++ b/hw/intc/riscv_imsic.c
128
@@ -XXX,XX +XXX,XX @@
129
#include "target/riscv/cpu.h"
130
#include "target/riscv/cpu_bits.h"
131
#include "sysemu/sysemu.h"
132
+#include "sysemu/kvm.h"
133
#include "migration/vmstate.h"
134
135
#define IMSIC_MMIO_PAGE_LE 0x00
136
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_write(void *opaque, hwaddr addr, uint64_t value,
137
goto err;
138
}
139
140
+#if defined(CONFIG_KVM)
141
+ if (kvm_irqchip_in_kernel()) {
142
+ struct kvm_msi msi;
143
+
144
+ msi.address_lo = extract64(imsic->mmio.addr + addr, 0, 32);
145
+ msi.address_hi = extract64(imsic->mmio.addr + addr, 32, 32);
146
+ msi.data = le32_to_cpu(value);
147
+
148
+ kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi);
149
+
150
+ return;
151
+ }
152
+#endif
153
+
154
/* Writes only supported for MSI little-endian registers */
155
page = addr >> IMSIC_MMIO_PAGE_SHIFT;
156
if ((addr & (IMSIC_MMIO_PAGE_SZ - 1)) == IMSIC_MMIO_PAGE_LE) {
157
@@ -XXX,XX +XXX,XX @@ static void riscv_imsic_realize(DeviceState *dev, Error **errp)
158
CPUState *cpu = cpu_by_arch_id(imsic->hartid);
159
CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
160
161
- imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
162
- imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
163
- imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
164
- imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
165
+ if (!kvm_irqchip_in_kernel()) {
166
+ imsic->num_eistate = imsic->num_pages * imsic->num_irqs;
167
+ imsic->eidelivery = g_new0(uint32_t, imsic->num_pages);
168
+ imsic->eithreshold = g_new0(uint32_t, imsic->num_pages);
169
+ imsic->eistate = g_new0(uint32_t, imsic->num_eistate);
170
+ }
171
172
memory_region_init_io(&imsic->mmio, OBJECT(dev), &riscv_imsic_ops,
173
imsic, TYPE_RISCV_IMSIC,
95
--
174
--
96
2.48.1
175
2.41.0
diff view generated by jsdifflib
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
1
From: Yong-Xuan Wang <yongxuan.wang@sifive.com>
2
2
3
Let kvm_msicfgaddr use the same format with mmsicfgaddr and smsicfgaddr.
3
Select KVM AIA when the host kernel has in-kernel AIA chip support.
4
Since KVM AIA only has one APLIC instance, we map the QEMU APLIC
5
devices to KVM APLIC.
4
6
5
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
7
Signed-off-by: Yong-Xuan Wang <yongxuan.wang@sifive.com>
8
Reviewed-by: Jim Shu <jim.shu@sifive.com>
6
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Message-ID: <20250224025722.3999-4-yongxuan.wang@sifive.com>
10
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
11
Message-ID: <20230727102439.22554-6-yongxuan.wang@sifive.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
13
---
10
hw/intc/riscv_aplic.c | 24 +++++++++++++-----------
14
hw/riscv/virt.c | 94 +++++++++++++++++++++++++++++++++----------------
11
1 file changed, 13 insertions(+), 11 deletions(-)
15
1 file changed, 63 insertions(+), 31 deletions(-)
12
16
13
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
17
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
14
index XXXXXXX..XXXXXXX 100644
18
index XXXXXXX..XXXXXXX 100644
15
--- a/hw/intc/riscv_aplic.c
19
--- a/hw/riscv/virt.c
16
+++ b/hw/intc/riscv_aplic.c
20
+++ b/hw/riscv/virt.c
17
@@ -XXX,XX +XXX,XX @@ void riscv_aplic_set_kvm_msicfgaddr(RISCVAPLICState *aplic, hwaddr addr)
21
@@ -XXX,XX +XXX,XX @@
22
#include "hw/riscv/virt.h"
23
#include "hw/riscv/boot.h"
24
#include "hw/riscv/numa.h"
25
+#include "kvm_riscv.h"
26
#include "hw/intc/riscv_aclint.h"
27
#include "hw/intc/riscv_aplic.h"
28
#include "hw/intc/riscv_imsic.h"
29
@@ -XXX,XX +XXX,XX @@
30
#error "Can't accommodate all IMSIC groups in address space"
31
#endif
32
33
+/* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */
34
+static bool virt_use_kvm_aia(RISCVVirtState *s)
35
+{
36
+ return kvm_irqchip_in_kernel() && s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC;
37
+}
38
+
39
static const MemMapEntry virt_memmap[] = {
40
[VIRT_DEBUG] = { 0x0, 0x100 },
41
[VIRT_MROM] = { 0x1000, 0xf000 },
42
@@ -XXX,XX +XXX,XX @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
43
uint32_t *intc_phandles,
44
uint32_t aplic_phandle,
45
uint32_t aplic_child_phandle,
46
- bool m_mode)
47
+ bool m_mode, int num_harts)
18
{
48
{
19
#ifdef CONFIG_KVM
49
int cpu;
20
if (riscv_use_emulated_aplic(aplic->msimode)) {
50
char *aplic_name;
21
+ addr >>= APLIC_xMSICFGADDR_PPN_SHIFT;
51
uint32_t *aplic_cells;
22
aplic->kvm_msicfgaddr = extract64(addr, 0, 32);
52
MachineState *ms = MACHINE(s);
23
- aplic->kvm_msicfgaddrH = extract64(addr, 32, 32);
53
24
+ aplic->kvm_msicfgaddrH = extract64(addr, 32, 32) &
54
- aplic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2);
25
+ APLIC_xMSICFGADDRH_VALID_MASK;
55
+ aplic_cells = g_new0(uint32_t, num_harts * 2);
56
57
- for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) {
58
+ for (cpu = 0; cpu < num_harts; cpu++) {
59
aplic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]);
60
aplic_cells[cpu * 2 + 1] = cpu_to_be32(m_mode ? IRQ_M_EXT : IRQ_S_EXT);
26
}
61
}
27
#endif
62
@@ -XXX,XX +XXX,XX @@ static void create_fdt_one_aplic(RISCVVirtState *s, int socket,
28
}
63
29
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
64
if (s->aia_type == VIRT_AIA_TYPE_APLIC) {
65
qemu_fdt_setprop(ms->fdt, aplic_name, "interrupts-extended",
66
- aplic_cells,
67
- s->soc[socket].num_harts * sizeof(uint32_t) * 2);
68
+ aplic_cells, num_harts * sizeof(uint32_t) * 2);
69
} else {
70
qemu_fdt_setprop_cell(ms->fdt, aplic_name, "msi-parent", msi_phandle);
71
}
72
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
73
uint32_t msi_s_phandle,
74
uint32_t *phandle,
75
uint32_t *intc_phandles,
76
- uint32_t *aplic_phandles)
77
+ uint32_t *aplic_phandles,
78
+ int num_harts)
79
{
80
char *aplic_name;
81
unsigned long aplic_addr;
82
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
83
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_M].size,
84
msi_m_phandle, intc_phandles,
85
aplic_m_phandle, aplic_s_phandle,
86
- true);
87
+ true, num_harts);
88
}
89
90
/* S-level APLIC node */
91
@@ -XXX,XX +XXX,XX @@ static void create_fdt_socket_aplic(RISCVVirtState *s,
92
create_fdt_one_aplic(s, socket, aplic_addr, memmap[VIRT_APLIC_S].size,
93
msi_s_phandle, intc_phandles,
94
aplic_s_phandle, 0,
95
- false);
96
+ false, num_harts);
97
98
aplic_name = g_strdup_printf("/soc/aplic@%lx", aplic_addr);
99
100
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
101
*msi_pcie_phandle = msi_s_phandle;
102
}
103
104
- phandle_pos = ms->smp.cpus;
105
- for (socket = (socket_count - 1); socket >= 0; socket--) {
106
- phandle_pos -= s->soc[socket].num_harts;
107
-
108
- if (s->aia_type == VIRT_AIA_TYPE_NONE) {
109
- create_fdt_socket_plic(s, memmap, socket, phandle,
110
- &intc_phandles[phandle_pos], xplic_phandles);
111
- } else {
112
- create_fdt_socket_aplic(s, memmap, socket,
113
- msi_m_phandle, msi_s_phandle, phandle,
114
- &intc_phandles[phandle_pos], xplic_phandles);
115
+ /* KVM AIA only has one APLIC instance */
116
+ if (virt_use_kvm_aia(s)) {
117
+ create_fdt_socket_aplic(s, memmap, 0,
118
+ msi_m_phandle, msi_s_phandle, phandle,
119
+ &intc_phandles[0], xplic_phandles,
120
+ ms->smp.cpus);
121
+ } else {
122
+ phandle_pos = ms->smp.cpus;
123
+ for (socket = (socket_count - 1); socket >= 0; socket--) {
124
+ phandle_pos -= s->soc[socket].num_harts;
125
+
126
+ if (s->aia_type == VIRT_AIA_TYPE_NONE) {
127
+ create_fdt_socket_plic(s, memmap, socket, phandle,
128
+ &intc_phandles[phandle_pos],
129
+ xplic_phandles);
130
+ } else {
131
+ create_fdt_socket_aplic(s, memmap, socket,
132
+ msi_m_phandle, msi_s_phandle, phandle,
133
+ &intc_phandles[phandle_pos],
134
+ xplic_phandles,
135
+ s->soc[socket].num_harts);
136
+ }
30
}
137
}
31
}
138
}
32
139
33
- if (aplic->mmode) {
140
g_free(intc_phandles);
34
- msicfgaddr = aplic_m->mmsicfgaddr;
141
35
- msicfgaddrH = aplic_m->mmsicfgaddrH;
142
- for (socket = 0; socket < socket_count; socket++) {
36
+ if (aplic->kvm_splitmode) {
143
- if (socket == 0) {
37
+ msicfgaddr = aplic->kvm_msicfgaddr;
144
- *irq_mmio_phandle = xplic_phandles[socket];
38
+ msicfgaddrH = ((uint64_t)aplic->kvm_msicfgaddrH << 32);
145
- *irq_virtio_phandle = xplic_phandles[socket];
39
} else {
146
- *irq_pcie_phandle = xplic_phandles[socket];
40
- msicfgaddr = aplic_m->smsicfgaddr;
147
- }
41
- msicfgaddrH = aplic_m->smsicfgaddrH;
148
- if (socket == 1) {
42
+ if (aplic->mmode) {
149
- *irq_virtio_phandle = xplic_phandles[socket];
43
+ msicfgaddr = aplic_m->mmsicfgaddr;
150
- *irq_pcie_phandle = xplic_phandles[socket];
44
+ msicfgaddrH = aplic_m->mmsicfgaddrH;
151
- }
45
+ } else {
152
- if (socket == 2) {
46
+ msicfgaddr = aplic_m->smsicfgaddr;
153
- *irq_pcie_phandle = xplic_phandles[socket];
47
+ msicfgaddrH = aplic_m->smsicfgaddrH;
154
+ if (virt_use_kvm_aia(s)) {
48
+ }
155
+ *irq_mmio_phandle = xplic_phandles[0];
156
+ *irq_virtio_phandle = xplic_phandles[0];
157
+ *irq_pcie_phandle = xplic_phandles[0];
158
+ } else {
159
+ for (socket = 0; socket < socket_count; socket++) {
160
+ if (socket == 0) {
161
+ *irq_mmio_phandle = xplic_phandles[socket];
162
+ *irq_virtio_phandle = xplic_phandles[socket];
163
+ *irq_pcie_phandle = xplic_phandles[socket];
164
+ }
165
+ if (socket == 1) {
166
+ *irq_virtio_phandle = xplic_phandles[socket];
167
+ *irq_pcie_phandle = xplic_phandles[socket];
168
+ }
169
+ if (socket == 2) {
170
+ *irq_pcie_phandle = xplic_phandles[socket];
171
+ }
172
}
49
}
173
}
50
174
51
lhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXS_SHIFT) &
175
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
52
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
176
}
53
addr |= (uint64_t)(guest_idx & APLIC_xMSICFGADDR_PPN_HART(lhxs));
177
}
54
addr <<= APLIC_xMSICFGADDR_PPN_SHIFT;
178
55
179
+ if (virt_use_kvm_aia(s)) {
56
- if (aplic->kvm_splitmode) {
180
+ kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
57
- addr |= aplic->kvm_msicfgaddr;
181
+ VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
58
- addr |= ((uint64_t)aplic->kvm_msicfgaddrH << 32);
182
+ memmap[VIRT_APLIC_S].base,
59
- }
183
+ memmap[VIRT_IMSIC_S].base,
60
-
184
+ s->aia_guests);
61
address_space_stl_le(&address_space_memory, addr,
185
+ }
62
eiid, MEMTXATTRS_UNSPECIFIED, &result);
186
+
63
if (result != MEMTX_OK) {
187
if (riscv_is_32bit(&s->soc[0])) {
188
#if HOST_LONG_BITS == 64
189
/* limit RAM size in a 32-bit system */
64
--
190
--
65
2.48.1
191
2.41.0
diff view generated by jsdifflib
1
From: Vasilis Liaskovitis <vliaskovitis@suse.com>
1
From: Conor Dooley <conor.dooley@microchip.com>
2
2
3
Add an "aliases" node with a "serial0" entry for the single UART
3
On a dtb dumped from the virt machine, dt-validate complains:
4
in the riscv virt machine.
4
soc: pmu: {'riscv,event-to-mhpmcounters': [[1, 1, 524281], [2, 2, 524284], [65561, 65561, 524280], [65563, 65563, 524280], [65569, 65569, 524280]], 'compatible': ['riscv,pmu']} should not be valid under {'type': 'object'}
5
from schema $id: http://devicetree.org/schemas/simple-bus.yaml#
6
That's pretty cryptic, but running the dtb back through dtc produces
7
something a lot more reasonable:
8
Warning (simple_bus_reg): /soc/pmu: missing or empty reg/ranges property
5
9
6
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2774
10
Moving the riscv,pmu node out of the soc bus solves the problem.
7
Signed-off-by: Vasilis Liaskovitis <vliaskovitis@suse.com>
11
8
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
12
Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250116161007.39710-1-vliaskovitis@suse.com>
14
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
15
Message-ID: <20230727-groom-decline-2c57ce42841c@spud>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
17
---
13
hw/riscv/virt.c | 3 +++
18
hw/riscv/virt.c | 2 +-
14
1 file changed, 3 insertions(+)
19
1 file changed, 1 insertion(+), 1 deletion(-)
15
20
16
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
21
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
17
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
18
--- a/hw/riscv/virt.c
23
--- a/hw/riscv/virt.c
19
+++ b/hw/riscv/virt.c
24
+++ b/hw/riscv/virt.c
20
@@ -XXX,XX +XXX,XX @@ static void create_fdt_uart(RISCVVirtState *s, const MemMapEntry *memmap,
25
@@ -XXX,XX +XXX,XX @@ static void create_fdt_pmu(RISCVVirtState *s)
21
}
26
MachineState *ms = MACHINE(s);
22
27
RISCVCPU hart = s->soc[0].harts[0];
23
qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", name);
28
24
+ qemu_fdt_setprop_string(ms->fdt, "/aliases", "serial0", name);
29
- pmu_name = g_strdup_printf("/soc/pmu");
25
}
30
+ pmu_name = g_strdup_printf("/pmu");
26
31
qemu_fdt_add_subnode(ms->fdt, pmu_name);
27
static void create_fdt_rtc(RISCVVirtState *s, const MemMapEntry *memmap,
32
qemu_fdt_setprop_string(ms->fdt, pmu_name, "compatible", "riscv,pmu");
28
@@ -XXX,XX +XXX,XX @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap)
33
riscv_pmu_generate_fdt_node(ms->fdt, hart.cfg.pmu_num, pmu_name);
29
qemu_fdt_setprop(ms->fdt, "/chosen", "rng-seed",
30
rng_seed, sizeof(rng_seed));
31
32
+ qemu_fdt_add_subnode(ms->fdt, "/aliases");
33
+
34
create_fdt_flash(s, memmap);
35
create_fdt_fw_cfg(s, memmap);
36
create_fdt_pmu(s);
37
--
34
--
38
2.48.1
35
2.41.0
diff view generated by jsdifflib
1
From: julia <midnight@trainwit.ch>
1
From: Weiwei Li <liweiwei@iscas.ac.cn>
2
2
3
For instance, QEMUs newer than b6ecc63c569bb88c0fcadf79fb92bf4b88aefea8
3
The Svadu specification updated the name of the *envcfg bit from
4
would silently treat this akin to an unmapped page (as required by the
4
HADE to ADUE.
5
RISC-V spec, admittedly). However, not all hardware platforms do (e.g.
6
CVA6) which leads to an apparent QEMU bug.
7
5
8
Instead, log a guest error so that in future, incorrectly set up page
6
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
9
tables can be debugged without bisecting QEMU.
7
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
10
11
Signed-off-by: julia <midnight@trainwit.ch>
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Message-ID: <20250203061852.2931556-1-midnight@trainwit.ch>
9
Message-ID: <20230816141916.66898-1-liweiwei@iscas.ac.cn>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
11
---
16
target/riscv/cpu_helper.c | 27 ++++++++++++++++++++++++++-
12
target/riscv/cpu_bits.h | 8 ++++----
17
1 file changed, 26 insertions(+), 1 deletion(-)
13
target/riscv/cpu.c | 4 ++--
14
target/riscv/cpu_helper.c | 6 +++---
15
target/riscv/csr.c | 12 ++++++------
16
4 files changed, 15 insertions(+), 15 deletions(-)
18
17
18
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/cpu_bits.h
21
+++ b/target/riscv/cpu_bits.h
22
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
23
#define MENVCFG_CBIE (3UL << 4)
24
#define MENVCFG_CBCFE BIT(6)
25
#define MENVCFG_CBZE BIT(7)
26
-#define MENVCFG_HADE (1ULL << 61)
27
+#define MENVCFG_ADUE (1ULL << 61)
28
#define MENVCFG_PBMTE (1ULL << 62)
29
#define MENVCFG_STCE (1ULL << 63)
30
31
/* For RV32 */
32
-#define MENVCFGH_HADE BIT(29)
33
+#define MENVCFGH_ADUE BIT(29)
34
#define MENVCFGH_PBMTE BIT(30)
35
#define MENVCFGH_STCE BIT(31)
36
37
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
38
#define HENVCFG_CBIE MENVCFG_CBIE
39
#define HENVCFG_CBCFE MENVCFG_CBCFE
40
#define HENVCFG_CBZE MENVCFG_CBZE
41
-#define HENVCFG_HADE MENVCFG_HADE
42
+#define HENVCFG_ADUE MENVCFG_ADUE
43
#define HENVCFG_PBMTE MENVCFG_PBMTE
44
#define HENVCFG_STCE MENVCFG_STCE
45
46
/* For RV32 */
47
-#define HENVCFGH_HADE MENVCFGH_HADE
48
+#define HENVCFGH_ADUE MENVCFGH_ADUE
49
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
50
#define HENVCFGH_STCE MENVCFGH_STCE
51
52
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
53
index XXXXXXX..XXXXXXX 100644
54
--- a/target/riscv/cpu.c
55
+++ b/target/riscv/cpu.c
56
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj)
57
env->two_stage_lookup = false;
58
59
env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
60
- (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
61
+ (cpu->cfg.ext_svadu ? MENVCFG_ADUE : 0);
62
env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
63
- (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
64
+ (cpu->cfg.ext_svadu ? HENVCFG_ADUE : 0);
65
66
/* Initialized default priorities of local interrupts. */
67
for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
19
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
68
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
20
index XXXXXXX..XXXXXXX 100644
69
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu_helper.c
70
--- a/target/riscv/cpu_helper.c
22
+++ b/target/riscv/cpu_helper.c
71
+++ b/target/riscv/cpu_helper.c
23
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
72
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
24
ppn = pte >> PTE_PPN_SHIFT;
73
}
25
} else {
74
26
if (pte & PTE_RESERVED) {
75
bool pbmte = env->menvcfg & MENVCFG_PBMTE;
27
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
76
- bool hade = env->menvcfg & MENVCFG_HADE;
28
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
77
+ bool adue = env->menvcfg & MENVCFG_ADUE;
29
+ __func__, pte_addr, pte);
78
30
return TRANSLATE_FAIL;
79
if (first_stage && two_stage && env->virt_enabled) {
31
}
80
pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE);
32
81
- hade = hade && (env->henvcfg & HENVCFG_HADE);
33
if (!pbmte && (pte & PTE_PBMT)) {
82
+ adue = adue && (env->henvcfg & HENVCFG_ADUE);
34
+ /* Reserved without Svpbmt. */
83
}
35
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
84
36
+ "and Svpbmt extension is disabled: "
85
int ptshift = (levels - 1) * ptidxbits;
37
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
86
@@ -XXX,XX +XXX,XX @@ restart:
38
+ __func__, pte_addr, pte);
87
39
return TRANSLATE_FAIL;
88
/* Page table updates need to be atomic with MTTCG enabled */
40
}
89
if (updated_pte != pte && !is_debug) {
41
90
- if (!hade) {
42
if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
91
+ if (!adue) {
43
+ /* Reserved without Svnapot extension */
44
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
45
+ "and Svnapot extension is disabled: "
46
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
47
+ __func__, pte_addr, pte);
48
return TRANSLATE_FAIL;
49
}
50
51
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
52
/* Invalid PTE */
53
return TRANSLATE_FAIL;
92
return TRANSLATE_FAIL;
54
}
93
}
55
+
94
56
if (pte & (PTE_R | PTE_W | PTE_X)) {
95
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
57
goto leaf;
96
index XXXXXXX..XXXXXXX 100644
58
}
97
--- a/target/riscv/csr.c
59
98
+++ b/target/riscv/csr.c
60
- /* Inner PTE, continue walking */
99
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
61
if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
100
if (riscv_cpu_mxl(env) == MXL_RV64) {
62
+ /* D, A, and U bits are reserved in non-leaf/inner PTEs */
101
mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
63
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
102
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
64
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
103
- (cfg->ext_svadu ? MENVCFG_HADE : 0);
65
+ __func__, pte_addr, pte);
104
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0);
66
return TRANSLATE_FAIL;
67
}
68
+ /* Inner PTE, continue walking */
69
base = ppn << PGSHIFT;
70
}
105
}
71
106
env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
72
@@ -XXX,XX +XXX,XX @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
107
73
leaf:
108
@@ -XXX,XX +XXX,XX @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
74
if (ppn & ((1ULL << ptshift) - 1)) {
109
const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
75
/* Misaligned PPN */
110
uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
76
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
111
(cfg->ext_sstc ? MENVCFG_STCE : 0) |
77
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
112
- (cfg->ext_svadu ? MENVCFG_HADE : 0);
78
+ __func__, pte_addr, pte);
113
+ (cfg->ext_svadu ? MENVCFG_ADUE : 0);
79
return TRANSLATE_FAIL;
114
uint64_t valh = (uint64_t)val << 32;
115
116
env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
117
@@ -XXX,XX +XXX,XX @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
118
* henvcfg.stce is read_only 0 when menvcfg.stce = 0
119
* henvcfg.hade is read_only 0 when menvcfg.hade = 0
120
*/
121
- *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) |
122
+ *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
123
env->menvcfg);
124
return RISCV_EXCP_NONE;
125
}
126
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
80
}
127
}
81
if (!pbmte && (pte & PTE_PBMT)) {
128
82
/* Reserved without Svpbmt. */
129
if (riscv_cpu_mxl(env) == MXL_RV64) {
83
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
130
- mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE);
84
+ "and Svpbmt extension is disabled: "
131
+ mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE);
85
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
86
+ __func__, pte_addr, pte);
87
return TRANSLATE_FAIL;
88
}
132
}
89
133
134
env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
135
@@ -XXX,XX +XXX,XX @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
136
return ret;
137
}
138
139
- *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) |
140
+ *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_ADUE) |
141
env->menvcfg)) >> 32;
142
return RISCV_EXCP_NONE;
143
}
144
@@ -XXX,XX +XXX,XX @@ static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
145
target_ulong val)
146
{
147
uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE |
148
- HENVCFG_HADE);
149
+ HENVCFG_ADUE);
150
uint64_t valh = (uint64_t)val << 32;
151
RISCVException ret;
152
90
--
153
--
91
2.48.1
154
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
The current 'parent' mechanic for profiles allows for one profile to be
3
In the same emulated RISC-V host, the 'host' KVM CPU takes 4 times
4
a child of a previous/older profile, enabling all its extensions (and
4
longer to boot than the 'rv64' KVM CPU.
5
the parent profile itself) and sparing us from tediously listing all
6
extensions for every profile.
7
5
8
This works fine for u-mode profiles. For s-mode profiles this is not
6
The reason is an unintended behavior of riscv_cpu_satp_mode_finalize()
9
enough: a s-mode profile extends not only his equivalent u-mode profile
7
when satp_mode.supported = 0, i.e. when cpu_init() does not set
10
but also the previous s-mode profile. This means, for example, that
8
satp_mode_max_supported(). satp_mode_max_from_map(map) does:
11
RVA23S64 extends both RVA23U64 and RVA22S64.
12
9
13
To fit this usage, rename the existing 'parent' to 'u_parent' and add a
10
31 - __builtin_clz(map)
14
new 's_parent' attribute for profiles. Handle both like we were doing
15
with the previous 'parent' attribute, i.e. if set, enable it. This
16
change does nothing for the existing profiles but will make RVA23S64
17
simpler.
18
11
19
Suggested-by: Andrew Jones <ajones@ventanamicro.com>
12
This means that, if satp_mode.supported = 0, satp_mode_supported_max
13
wil be '31 - 32'. But this is C, so satp_mode_supported_max will gladly
14
set it to UINT_MAX (4294967295). After that, if the user didn't set a
15
satp_mode, set_satp_mode_default_map(cpu) will make
16
17
cfg.satp_mode.map = cfg.satp_mode.supported
18
19
So satp_mode.map = 0. And then satp_mode_map_max will be set to
20
satp_mode_max_from_map(cpu->cfg.satp_mode.map), i.e. also UINT_MAX. The
21
guard "satp_mode_map_max > satp_mode_supported_max" doesn't protect us
22
here since both are UINT_MAX.
23
24
And finally we have 2 loops:
25
26
for (int i = satp_mode_map_max - 1; i >= 0; --i) {
27
28
Which are, in fact, 2 loops from UINT_MAX -1 to -1. This is where the
29
extra delay when booting the 'host' CPU is coming from.
30
31
Commit 43d1de32f8 already set a precedence for satp_mode.supported = 0
32
in a different manner. We're doing the same here. If supported == 0,
33
interpret as 'the CPU wants the OS to handle satp mode alone' and skip
34
satp_mode_finalize().
35
36
We'll also put a guard in satp_mode_max_from_map() to assert out if map
37
is 0 since the function is not ready to deal with it.
38
39
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
40
Fixes: 6f23aaeb9b ("riscv: Allow user to set the satp mode")
20
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
41
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
21
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
42
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
22
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
43
Message-ID: <20230817152903.694926-1-dbarboza@ventanamicro.com>
23
Message-ID: <20250115184316.2344583-4-dbarboza@ventanamicro.com>
24
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
44
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
25
---
45
---
26
target/riscv/cpu.h | 3 ++-
46
target/riscv/cpu.c | 23 ++++++++++++++++++++---
27
target/riscv/cpu.c | 6 ++++--
47
1 file changed, 20 insertions(+), 3 deletions(-)
28
target/riscv/tcg/tcg-cpu.c | 35 ++++++++++++++++++++++++++---------
29
3 files changed, 32 insertions(+), 12 deletions(-)
30
48
31
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/cpu.h
34
+++ b/target/riscv/cpu.h
35
@@ -XXX,XX +XXX,XX @@ const char *riscv_get_misa_ext_description(uint32_t bit);
36
#define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop)
37
38
typedef struct riscv_cpu_profile {
39
- struct riscv_cpu_profile *parent;
40
+ struct riscv_cpu_profile *u_parent;
41
+ struct riscv_cpu_profile *s_parent;
42
const char *name;
43
uint32_t misa_ext;
44
bool enabled;
45
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
49
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
46
index XXXXXXX..XXXXXXX 100644
50
index XXXXXXX..XXXXXXX 100644
47
--- a/target/riscv/cpu.c
51
--- a/target/riscv/cpu.c
48
+++ b/target/riscv/cpu.c
52
+++ b/target/riscv/cpu.c
49
@@ -XXX,XX +XXX,XX @@ static const PropertyInfo prop_marchid = {
53
@@ -XXX,XX +XXX,XX @@ static uint8_t satp_mode_from_str(const char *satp_mode_str)
50
* doesn't need to be manually enabled by the profile.
54
51
*/
55
uint8_t satp_mode_max_from_map(uint32_t map)
52
static RISCVCPUProfile RVA22U64 = {
56
{
53
- .parent = NULL,
57
+ /*
54
+ .u_parent = NULL,
58
+ * 'map = 0' will make us return (31 - 32), which C will
55
+ .s_parent = NULL,
59
+ * happily overflow to UINT_MAX. There's no good result to
56
.name = "rva22u64",
60
+ * return if 'map = 0' (e.g. returning 0 will be ambiguous
57
.misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVB | RVU,
61
+ * with the result for 'map = 1').
58
.priv_spec = RISCV_PROFILE_ATTR_UNUSED,
62
+ *
59
@@ -XXX,XX +XXX,XX @@ static RISCVCPUProfile RVA22U64 = {
63
+ * Assert out if map = 0. Callers will have to deal with
60
* The remaining features/extensions comes from RVA22U64.
64
+ * it outside of this function.
61
*/
65
+ */
62
static RISCVCPUProfile RVA22S64 = {
66
+ g_assert(map > 0);
63
- .parent = &RVA22U64,
67
+
64
+ .u_parent = &RVA22U64,
68
/* map here has at least one bit set, so no problem with clz */
65
+ .s_parent = NULL,
69
return 31 - __builtin_clz(map);
66
.name = "rva22s64",
67
.misa_ext = RVS,
68
.priv_spec = PRIV_VERSION_1_12_0,
69
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
70
index XXXXXXX..XXXXXXX 100644
71
--- a/target/riscv/tcg/tcg-cpu.c
72
+++ b/target/riscv/tcg/tcg-cpu.c
73
@@ -XXX,XX +XXX,XX @@ static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
74
}
70
}
75
#endif
71
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
76
72
static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
77
+static void riscv_cpu_check_parent_profile(RISCVCPU *cpu,
73
{
78
+ RISCVCPUProfile *profile,
74
bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
79
+ RISCVCPUProfile *parent)
75
- uint8_t satp_mode_map_max;
80
+{
76
- uint8_t satp_mode_supported_max =
81
+ const char *parent_name;
77
- satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
82
+ bool parent_enabled;
78
+ uint8_t satp_mode_map_max, satp_mode_supported_max;
83
+
79
+
84
+ if (!profile->enabled || !parent) {
80
+ /* The CPU wants the OS to decide which satp mode to use */
81
+ if (cpu->cfg.satp_mode.supported == 0) {
85
+ return;
82
+ return;
86
+ }
83
+ }
87
+
84
+
88
+ parent_name = parent->name;
85
+ satp_mode_supported_max =
89
+ parent_enabled = object_property_get_bool(OBJECT(cpu), parent_name, NULL);
86
+ satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
90
+ profile->enabled = parent_enabled;
87
91
+}
88
if (cpu->cfg.satp_mode.map == 0) {
92
+
89
if (cpu->cfg.satp_mode.init == 0) {
93
static void riscv_cpu_validate_profile(RISCVCPU *cpu,
94
RISCVCPUProfile *profile)
95
{
96
CPURISCVState *env = &cpu->env;
97
const char *warn_msg = "Profile %s mandates disabled extension %s";
98
bool send_warn = profile->user_set && profile->enabled;
99
- bool parent_enabled, profile_impl = true;
100
+ bool profile_impl = true;
101
int i;
102
103
#ifndef CONFIG_USER_ONLY
104
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
105
106
profile->enabled = profile_impl;
107
108
- if (profile->parent != NULL) {
109
- parent_enabled = object_property_get_bool(OBJECT(cpu),
110
- profile->parent->name,
111
- NULL);
112
- profile->enabled = profile->enabled && parent_enabled;
113
- }
114
+ riscv_cpu_check_parent_profile(cpu, profile, profile->u_parent);
115
+ riscv_cpu_check_parent_profile(cpu, profile, profile->s_parent);
116
}
117
118
static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
119
@@ -XXX,XX +XXX,XX @@ static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
120
profile->user_set = true;
121
profile->enabled = value;
122
123
- if (profile->parent != NULL) {
124
- object_property_set_bool(obj, profile->parent->name,
125
+ if (profile->u_parent != NULL) {
126
+ object_property_set_bool(obj, profile->u_parent->name,
127
+ profile->enabled, NULL);
128
+ }
129
+
130
+ if (profile->s_parent != NULL) {
131
+ object_property_set_bool(obj, profile->s_parent->name,
132
profile->enabled, NULL);
133
}
134
135
--
90
--
136
2.48.1
91
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Vineet Gupta <vineetg@rivosinc.com>
2
2
3
ssu64xl is defined in RVA22 as:
3
zicond is now codegen supported in both llvm and gcc.
4
4
5
"sstatus.UXL must be capable of holding the value 2 (i.e., UXLEN=64 must
5
This change allows seamless enabling/testing of zicond in downstream
6
be supported)."
6
projects. e.g. currently riscv-gnu-toolchain parses elf attributes
7
to create a cmdline for qemu but fails short of enabling it because of
8
the "x-" prefix.
7
9
8
This is always true in TCG and it's mandatory for RVA23, so claim
10
Signed-off-by: Vineet Gupta <vineetg@rivosinc.com>
9
support for it.
11
Message-ID: <20230808181715.436395-1-vineetg@rivosinc.com>
10
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
13
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
14
Message-ID: <20250115184316.2344583-2-dbarboza@ventanamicro.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
14
---
17
target/riscv/cpu.c | 1 +
15
target/riscv/cpu.c | 2 +-
18
tests/data/acpi/riscv64/virt/RHCT | Bin 390 -> 398 bytes
16
1 file changed, 1 insertion(+), 1 deletion(-)
19
2 files changed, 1 insertion(+)
20
17
21
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
18
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
22
index XXXXXXX..XXXXXXX 100644
19
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/cpu.c
20
--- a/target/riscv/cpu.c
24
+++ b/target/riscv/cpu.c
21
+++ b/target/riscv/cpu.c
25
@@ -XXX,XX +XXX,XX @@ const RISCVIsaExtData isa_edata_arr[] = {
22
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
26
ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
23
DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false),
27
ISA_EXT_DATA_ENTRY(sstvala, PRIV_VERSION_1_12_0, has_priv_1_12),
24
DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false),
28
ISA_EXT_DATA_ENTRY(sstvecd, PRIV_VERSION_1_12_0, has_priv_1_12),
25
DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false),
29
+ ISA_EXT_DATA_ENTRY(ssu64xl, PRIV_VERSION_1_12_0, has_priv_1_12),
26
+ DEFINE_PROP_BOOL("zicond", RISCVCPU, cfg.ext_zicond, false),
30
ISA_EXT_DATA_ENTRY(supm, PRIV_VERSION_1_13_0, ext_supm),
27
31
ISA_EXT_DATA_ENTRY(svade, PRIV_VERSION_1_11_0, ext_svade),
28
/* Vendor-specific custom extensions */
32
ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
29
DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
33
diff --git a/tests/data/acpi/riscv64/virt/RHCT b/tests/data/acpi/riscv64/virt/RHCT
30
@@ -XXX,XX +XXX,XX @@ static Property riscv_cpu_extensions[] = {
34
index XXXXXXX..XXXXXXX 100644
31
DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
35
Binary files a/tests/data/acpi/riscv64/virt/RHCT and b/tests/data/acpi/riscv64/virt/RHCT differ
32
33
/* These are experimental so mark with 'x-' */
34
- DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
35
36
/* ePMP 0.9.3 */
37
DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
36
--
38
--
37
2.48.1
39
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
Add a handful of trace events to allow for an easier time debugging the
3
A build with --enable-debug and without KVM will fail as follows:
4
HPM feature.
5
4
5
/usr/bin/ld: libqemu-riscv64-softmmu.fa.p/hw_riscv_virt.c.o: in function `virt_machine_init':
6
./qemu/build/../hw/riscv/virt.c:1465: undefined reference to `kvm_riscv_aia_create'
7
8
This happens because the code block with "if virt_use_kvm_aia(s)" isn't
9
being ignored by the debug build, resulting in an undefined reference to
10
a KVM only function.
11
12
Add a 'kvm_enabled()' conditional together with virt_use_kvm_aia() will
13
make the compiler crop the kvm_riscv_aia_create() call entirely from a
14
non-KVM build. Note that adding the 'kvm_enabled()' conditional inside
15
virt_use_kvm_aia() won't fix the build because this function would need
16
to be inlined multiple times to make the compiler zero out the entire
17
block.
18
19
While we're at it, use kvm_enabled() in all instances where
20
virt_use_kvm_aia() is checked to allow the compiler to elide these other
21
kvm-only instances as well.
22
23
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
24
Fixes: dbdb99948e ("target/riscv: select KVM AIA in riscv virt machine")
6
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
25
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
26
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
8
Message-ID: <20250224190826.1858473-11-dbarboza@ventanamicro.com>
27
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
28
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
29
Message-ID: <20230830133503.711138-2-dbarboza@ventanamicro.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
30
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
31
---
11
hw/riscv/riscv-iommu-hpm.c | 10 ++++++++++
32
hw/riscv/virt.c | 6 +++---
12
hw/riscv/trace-events | 5 +++++
33
1 file changed, 3 insertions(+), 3 deletions(-)
13
2 files changed, 15 insertions(+)
14
34
15
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
35
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
16
index XXXXXXX..XXXXXXX 100644
36
index XXXXXXX..XXXXXXX 100644
17
--- a/hw/riscv/riscv-iommu-hpm.c
37
--- a/hw/riscv/virt.c
18
+++ b/hw/riscv/riscv-iommu-hpm.c
38
+++ b/hw/riscv/virt.c
19
@@ -XXX,XX +XXX,XX @@ uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
39
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
20
const uint64_t ctr_prev = s->hpmcycle_prev;
21
const uint64_t ctr_val = s->hpmcycle_val;
22
23
+ trace_riscv_iommu_hpm_read(cycle, inhibit, ctr_prev, ctr_val);
24
+
25
if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
26
/*
27
* Counter should not increment if inhibit bit is set. We can't really
28
@@ -XXX,XX +XXX,XX @@ static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
29
cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
30
stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
31
32
+ trace_riscv_iommu_hpm_incr_ctr(cntr_val);
33
+
34
/* Handle the overflow scenario. */
35
if (cntr_val == UINT64_MAX) {
36
/*
37
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
38
return;
39
}
40
}
40
41
41
+ trace_riscv_iommu_hpm_iocntinh_cy(prev_cy_inh);
42
/* KVM AIA only has one APLIC instance */
42
+
43
- if (virt_use_kvm_aia(s)) {
43
if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
44
+ if (kvm_enabled() && virt_use_kvm_aia(s)) {
44
/*
45
create_fdt_socket_aplic(s, memmap, 0,
45
* Cycle counter is enabled. Just start the timer again and update
46
msi_m_phandle, msi_s_phandle, phandle,
46
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
47
&intc_phandles[0], xplic_phandles,
47
const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES);
48
@@ -XXX,XX +XXX,XX @@ static void create_fdt_sockets(RISCVVirtState *s, const MemMapEntry *memmap,
48
const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
49
49
50
g_free(intc_phandles);
50
+ trace_riscv_iommu_hpm_cycle_write(ovf, val);
51
51
+
52
- if (virt_use_kvm_aia(s)) {
52
/*
53
+ if (kvm_enabled() && virt_use_kvm_aia(s)) {
53
* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register.
54
*irq_mmio_phandle = xplic_phandles[0];
54
*/
55
*irq_virtio_phandle = xplic_phandles[0];
55
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
56
*irq_pcie_phandle = xplic_phandles[0];
56
return;
57
@@ -XXX,XX +XXX,XX @@ static void virt_machine_init(MachineState *machine)
58
}
57
}
59
}
58
60
59
+ trace_riscv_iommu_hpm_evt_write(ctr_idx, ovf, val);
61
- if (virt_use_kvm_aia(s)) {
60
+
62
+ if (kvm_enabled() && virt_use_kvm_aia(s)) {
61
/* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
63
kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
62
if (get_field(ovf, BIT(ctr_idx + 1)) &&
64
VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
63
!get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
65
memmap[VIRT_APLIC_S].base,
64
diff --git a/hw/riscv/trace-events b/hw/riscv/trace-events
65
index XXXXXXX..XXXXXXX 100644
66
--- a/hw/riscv/trace-events
67
+++ b/hw/riscv/trace-events
68
@@ -XXX,XX +XXX,XX @@ riscv_iommu_sys_irq_sent(uint32_t vector) "IRQ sent to vector %u"
69
riscv_iommu_sys_msi_sent(uint32_t vector, uint64_t msi_addr, uint32_t msi_data, uint32_t result) "MSI sent to vector %u msi_addr 0x%"PRIx64" msi_data 0x%x result %u"
70
riscv_iommu_sys_reset_hold(int reset_type) "reset type %d"
71
riscv_iommu_pci_reset_hold(int reset_type) "reset type %d"
72
+riscv_iommu_hpm_read(uint64_t cycle, uint32_t inhibit, uint64_t ctr_prev, uint64_t ctr_val) "cycle 0x%"PRIx64" inhibit 0x%x ctr_prev 0x%"PRIx64" ctr_val 0x%"PRIx64
73
+riscv_iommu_hpm_incr_ctr(uint64_t cntr_val) "cntr_val 0x%"PRIx64
74
+riscv_iommu_hpm_iocntinh_cy(bool prev_cy_inh) "prev_cy_inh %d"
75
+riscv_iommu_hpm_cycle_write(uint32_t ovf, uint64_t val) "ovf 0x%x val 0x%"PRIx64
76
+riscv_iommu_hpm_evt_write(uint32_t ctr_idx, uint32_t ovf, uint64_t val) "ctr_idx 0x%x ovf 0x%x val 0x%"PRIx64
77
--
66
--
78
2.48.1
67
2.41.0
68
69
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
2
3
We're setting reset vals for KVM csrs during kvm_riscv_reset_vcpu(), but
3
Commit 6df0b37e2ab breaks a --enable-debug build in a non-KVM
4
in no particular order and missing some of them (like env->mstatus).
4
environment with the following error:
5
5
6
Create a helper to do that, unclogging reset_vcpu(), and initialize
6
/usr/bin/ld: libqemu-riscv64-softmmu.fa.p/hw_intc_riscv_aplic.c.o: in function `riscv_kvm_aplic_request':
7
env->mstatus as well. Keep the regs in the same order they appear in
7
./qemu/build/../hw/intc/riscv_aplic.c:486: undefined reference to `kvm_set_irq'
8
struct kvm_riscv_csr from the KVM UAPI, similar to what
8
collect2: error: ld returned 1 exit status
9
kvm_riscv_(get|put)_regs_csr are doing. This will make a bit easier to
9
10
add new KVM CSRs and to verify which values we're writing back to KVM
10
This happens because the debug build will poke into the
11
during vcpu reset.
11
'if (is_kvm_aia(aplic->msimode))' block and fail to find a reference to
12
the KVM only function riscv_kvm_aplic_request().
13
14
There are multiple solutions to fix this. We'll go with the same
15
solution from the previous patch, i.e. add a kvm_enabled() conditional
16
to filter out the block. But there's a catch: riscv_kvm_aplic_request()
17
is a local function that would end up being used if the compiler crops
18
the block, and this won't work. Quoting Richard Henderson's explanation
19
in [1]:
20
21
"(...) the compiler won't eliminate entire unused functions with -O0"
22
23
We'll solve it by moving riscv_kvm_aplic_request() to kvm.c and add its
24
declaration in kvm_riscv.h, where all other KVM specific public
25
functions are already declared. Other archs handles KVM specific code in
26
this manner and we expect to do the same from now on.
27
28
[1] https://lore.kernel.org/qemu-riscv/d2f1ad02-eb03-138f-9d08-db676deeed05@linaro.org/
12
29
13
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
30
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
31
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
15
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
32
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
16
Message-ID: <20250224123120.1644186-3-dbarboza@ventanamicro.com>
33
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
34
Message-ID: <20230830133503.711138-3-dbarboza@ventanamicro.com>
17
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
35
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
18
---
36
---
19
target/riscv/kvm/kvm-cpu.c | 23 +++++++++++++++--------
37
target/riscv/kvm_riscv.h | 1 +
20
1 file changed, 15 insertions(+), 8 deletions(-)
38
hw/intc/riscv_aplic.c | 8 ++------
39
target/riscv/kvm.c | 5 +++++
40
3 files changed, 8 insertions(+), 6 deletions(-)
21
41
22
diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c
42
diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h
23
index XXXXXXX..XXXXXXX 100644
43
index XXXXXXX..XXXXXXX 100644
24
--- a/target/riscv/kvm/kvm-cpu.c
44
--- a/target/riscv/kvm_riscv.h
25
+++ b/target/riscv/kvm/kvm-cpu.c
45
+++ b/target/riscv/kvm_riscv.h
26
@@ -XXX,XX +XXX,XX @@ static int kvm_riscv_put_regs_core(CPUState *cs)
46
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift,
27
return ret;
47
uint64_t aia_irq_num, uint64_t aia_msi_num,
48
uint64_t aplic_base, uint64_t imsic_base,
49
uint64_t guest_num);
50
+void riscv_kvm_aplic_request(void *opaque, int irq, int level);
51
52
#endif
53
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
54
index XXXXXXX..XXXXXXX 100644
55
--- a/hw/intc/riscv_aplic.c
56
+++ b/hw/intc/riscv_aplic.c
57
@@ -XXX,XX +XXX,XX @@
58
#include "target/riscv/cpu.h"
59
#include "sysemu/sysemu.h"
60
#include "sysemu/kvm.h"
61
+#include "kvm_riscv.h"
62
#include "migration/vmstate.h"
63
64
#define APLIC_MAX_IDC (1UL << 14)
65
@@ -XXX,XX +XXX,XX @@ static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc)
66
return topi;
28
}
67
}
29
68
30
+static void kvm_riscv_reset_regs_csr(CPURISCVState *env)
69
-static void riscv_kvm_aplic_request(void *opaque, int irq, int level)
70
-{
71
- kvm_set_irq(kvm_state, irq, !!level);
72
-}
73
-
74
static void riscv_aplic_request(void *opaque, int irq, int level)
75
{
76
bool update = false;
77
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_realize(DeviceState *dev, Error **errp)
78
* have IRQ lines delegated by their parent APLIC.
79
*/
80
if (!aplic->parent) {
81
- if (is_kvm_aia(aplic->msimode)) {
82
+ if (kvm_enabled() && is_kvm_aia(aplic->msimode)) {
83
qdev_init_gpio_in(dev, riscv_kvm_aplic_request, aplic->num_irqs);
84
} else {
85
qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs);
86
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
87
index XXXXXXX..XXXXXXX 100644
88
--- a/target/riscv/kvm.c
89
+++ b/target/riscv/kvm.c
90
@@ -XXX,XX +XXX,XX @@
91
#include "sysemu/runstate.h"
92
#include "hw/riscv/numa.h"
93
94
+void riscv_kvm_aplic_request(void *opaque, int irq, int level)
31
+{
95
+{
32
+ env->mstatus = 0;
96
+ kvm_set_irq(kvm_state, irq, !!level);
33
+ env->mie = 0;
34
+ env->stvec = 0;
35
+ env->sscratch = 0;
36
+ env->sepc = 0;
37
+ env->scause = 0;
38
+ env->stval = 0;
39
+ env->mip = 0;
40
+ env->satp = 0;
41
+}
97
+}
42
+
98
+
43
static int kvm_riscv_get_regs_csr(CPUState *cs)
99
static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
100
uint64_t idx)
44
{
101
{
45
CPURISCVState *env = &RISCV_CPU(cs)->env;
46
@@ -XXX,XX +XXX,XX @@ void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
47
env->pc = cpu->env.kernel_addr;
48
env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
49
env->gpr[11] = cpu->env.fdt_addr; /* a1 */
50
- env->satp = 0;
51
- env->mie = 0;
52
- env->stvec = 0;
53
- env->sscratch = 0;
54
- env->sepc = 0;
55
- env->scause = 0;
56
- env->stval = 0;
57
- env->mip = 0;
58
+
59
+ kvm_riscv_reset_regs_csr(env);
60
}
61
62
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
63
--
102
--
64
2.48.1
103
2.41.0
104
105
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Robbin Ehn <rehn@rivosinc.com>
2
2
3
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
3
This patch adds the new extensions in
4
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
4
linux 6.5 to the hwprobe syscall.
5
Message-ID: <20250224190826.1858473-12-dbarboza@ventanamicro.com>
5
6
And fixes RVC check to OR with correct value.
7
The previous variable contains 0 therefore it
8
did work.
9
10
Signed-off-by: Robbin Ehn <rehn@rivosinc.com>
11
Acked-by: Richard Henderson <richard.henderson@linaro.org>
12
Acked-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-ID: <bc82203b72d7efb30f1b4a8f9eb3d94699799dc8.camel@rivosinc.com>
6
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
7
---
15
---
8
docs/specs/riscv-iommu.rst | 2 ++
16
linux-user/syscall.c | 14 +++++++++++++-
9
1 file changed, 2 insertions(+)
17
1 file changed, 13 insertions(+), 1 deletion(-)
10
18
11
diff --git a/docs/specs/riscv-iommu.rst b/docs/specs/riscv-iommu.rst
19
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
12
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
13
--- a/docs/specs/riscv-iommu.rst
21
--- a/linux-user/syscall.c
14
+++ b/docs/specs/riscv-iommu.rst
22
+++ b/linux-user/syscall.c
15
@@ -XXX,XX +XXX,XX @@ Several options are available to control the capabilities of the device, namely:
23
@@ -XXX,XX +XXX,XX @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
16
- "off" (Out-of-reset translation mode: 'on' for DMA disabled, 'off' for 'BARE' (passthrough))
24
#define RISCV_HWPROBE_KEY_IMA_EXT_0 4
17
- "s-stage": enable s-stage support
25
#define RISCV_HWPROBE_IMA_FD (1 << 0)
18
- "g-stage": enable g-stage support
26
#define RISCV_HWPROBE_IMA_C (1 << 1)
19
+- "hpm-counters": number of hardware performance counters available. Maximum value is 31.
27
+#define RISCV_HWPROBE_IMA_V (1 << 2)
20
+ Default value is 31. Use 0 (zero) to disable HPM support
28
+#define RISCV_HWPROBE_EXT_ZBA (1 << 3)
21
29
+#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
22
riscv-iommu-sys device
30
+#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
23
----------------------
31
32
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
33
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
34
@@ -XXX,XX +XXX,XX @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env,
35
riscv_has_ext(env, RVD) ?
36
RISCV_HWPROBE_IMA_FD : 0;
37
value |= riscv_has_ext(env, RVC) ?
38
- RISCV_HWPROBE_IMA_C : pair->value;
39
+ RISCV_HWPROBE_IMA_C : 0;
40
+ value |= riscv_has_ext(env, RVV) ?
41
+ RISCV_HWPROBE_IMA_V : 0;
42
+ value |= cfg->ext_zba ?
43
+ RISCV_HWPROBE_EXT_ZBA : 0;
44
+ value |= cfg->ext_zbb ?
45
+ RISCV_HWPROBE_EXT_ZBB : 0;
46
+ value |= cfg->ext_zbs ?
47
+ RISCV_HWPROBE_EXT_ZBS : 0;
48
__put_user(value, &pair->value);
49
break;
50
case RISCV_HWPROBE_KEY_CPUPERF_0:
24
--
51
--
25
2.48.1
52
2.41.0
diff view generated by jsdifflib
1
From: Clément Léger <cleger@rivosinc.com>
1
From: Ard Biesheuvel <ardb@kernel.org>
2
2
3
As raised by Richard Henderson, these warnings are displayed in user
3
Use the accelerated SubBytes/ShiftRows/AddRoundKey AES helper to
4
only as well. Since they aren't really useful for the end-user, remove
4
implement the first half of the key schedule derivation. This does not
5
them and add a "TODO" note in the leading comments.
5
actually involve shifting rows, so clone the same value into all four
6
columns of the AES vector to counter that operation.
6
7
7
Signed-off-by: Clément Léger <cleger@rivosinc.com>
8
Cc: Richard Henderson <richard.henderson@linaro.org>
8
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
9
Message-ID: <20250213145640.117275-1-cleger@rivosinc.com>
10
Cc: Palmer Dabbelt <palmer@dabbelt.com>
11
Cc: Alistair Francis <alistair.francis@wdc.com>
12
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
14
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
15
Message-ID: <20230831154118.138727-1-ardb@kernel.org>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
11
---
17
---
12
target/riscv/tcg/tcg-cpu.c | 8 +++-----
18
target/riscv/crypto_helper.c | 17 +++++------------
13
1 file changed, 3 insertions(+), 5 deletions(-)
19
1 file changed, 5 insertions(+), 12 deletions(-)
14
20
15
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
21
diff --git a/target/riscv/crypto_helper.c b/target/riscv/crypto_helper.c
16
index XXXXXXX..XXXXXXX 100644
22
index XXXXXXX..XXXXXXX 100644
17
--- a/target/riscv/tcg/tcg-cpu.c
23
--- a/target/riscv/crypto_helper.c
18
+++ b/target/riscv/tcg/tcg-cpu.c
24
+++ b/target/riscv/crypto_helper.c
19
@@ -XXX,XX +XXX,XX @@ static void riscv_init_max_cpu_extensions(Object *obj)
25
@@ -XXX,XX +XXX,XX @@ target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum)
26
27
uint8_t enc_rnum = rnum;
28
uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF;
29
- uint8_t rcon_ = 0;
30
- target_ulong result;
31
+ AESState t, rc = {};
32
33
if (enc_rnum != 0xA) {
34
temp = ror32(temp, 8); /* Rotate right by 8 */
35
- rcon_ = round_consts[enc_rnum];
36
+ rc.w[0] = rc.w[1] = round_consts[enc_rnum];
20
}
37
}
21
38
22
/*
39
- temp = ((uint32_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
23
- * ext_smrnmi requires OpenSBI changes that our current
40
- ((uint32_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
24
+ * TODO: ext_smrnmi requires OpenSBI changes that our current
41
- ((uint32_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
25
* image does not have. Disable it for now.
42
- ((uint32_t)AES_sbox[(temp >> 0) & 0xFF] << 0);
26
*/
43
+ t.w[0] = t.w[1] = t.w[2] = t.w[3] = temp;
27
if (cpu->cfg.ext_smrnmi) {
44
+ aesenc_SB_SR_AK(&t, &t, &rc, false);
28
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smrnmi), false);
45
29
- qemu_log("Smrnmi is disabled in the 'max' type CPU\n");
46
- temp ^= rcon_;
30
}
47
-
31
48
- result = ((uint64_t)temp << 32) | temp;
32
/*
49
-
33
- * ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup to
50
- return result;
34
- * avoid generating a double trap. OpenSBI does not currently support it,
51
+ return t.d[0];
35
+ * TODO: ext_smdbltrp requires the firmware to clear MSTATUS.MDT on startup
36
+ * to avoid generating a double trap. OpenSBI does not currently support it,
37
* disable it for now.
38
*/
39
if (cpu->cfg.ext_smdbltrp) {
40
isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_smdbltrp), false);
41
- qemu_log("Smdbltrp is disabled in the 'max' type CPU\n");
42
}
43
}
52
}
44
53
54
target_ulong HELPER(aes64im)(target_ulong rs1)
45
--
55
--
46
2.48.1
56
2.41.0
47
57
48
58
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Akihiko Odaki <akihiko.odaki@daynix.com>
2
2
3
The mcontrol select bit (19) is always zero, meaning our triggers will
3
riscv_trigger_init() had been called on reset events that can happen
4
always match virtual addresses. In this condition, if the user does not
4
several times for a CPU and it allocated timers for itrigger. If old
5
specify a size for the trigger, the access size defaults to XLEN.
5
timers were present, they were simply overwritten by the new timers,
6
resulting in a memory leak.
6
7
7
At this moment we're using def_size = 8 regardless of CPU XLEN. Use
8
Divide riscv_trigger_init() into two functions, namely
8
def_size = 4 in case we're running 32 bits.
9
riscv_trigger_realize() and riscv_trigger_reset() and call them in
10
appropriate timing. The timer allocation will happen only once for a
11
CPU in riscv_trigger_realize().
9
12
10
Fixes: 95799e36c1 ("target/riscv: Add initial support for the Sdtrig extension")
13
Fixes: 5a4ae64cac ("target/riscv: Add itrigger support when icount is enabled")
11
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
15
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
16
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
17
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
18
Message-ID: <20230818034059.9146-1-akihiko.odaki@daynix.com>
14
Message-ID: <20250121170626.1992570-2-dbarboza@ventanamicro.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
19
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
20
---
17
target/riscv/debug.c | 6 ++++--
21
target/riscv/debug.h | 3 ++-
18
1 file changed, 4 insertions(+), 2 deletions(-)
22
target/riscv/cpu.c | 8 +++++++-
23
target/riscv/debug.c | 15 ++++++++++++---
24
3 files changed, 21 insertions(+), 5 deletions(-)
19
25
26
diff --git a/target/riscv/debug.h b/target/riscv/debug.h
27
index XXXXXXX..XXXXXXX 100644
28
--- a/target/riscv/debug.h
29
+++ b/target/riscv/debug.h
30
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_debug_excp_handler(CPUState *cs);
31
bool riscv_cpu_debug_check_breakpoint(CPUState *cs);
32
bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
33
34
-void riscv_trigger_init(CPURISCVState *env);
35
+void riscv_trigger_realize(CPURISCVState *env);
36
+void riscv_trigger_reset_hold(CPURISCVState *env);
37
38
bool riscv_itrigger_enabled(CPURISCVState *env);
39
void riscv_itrigger_update_priv(CPURISCVState *env);
40
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
41
index XXXXXXX..XXXXXXX 100644
42
--- a/target/riscv/cpu.c
43
+++ b/target/riscv/cpu.c
44
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_reset_hold(Object *obj)
45
46
#ifndef CONFIG_USER_ONLY
47
if (cpu->cfg.debug) {
48
- riscv_trigger_init(env);
49
+ riscv_trigger_reset_hold(env);
50
}
51
52
if (kvm_enabled()) {
53
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
54
55
riscv_cpu_register_gdb_regs_for_features(cs);
56
57
+#ifndef CONFIG_USER_ONLY
58
+ if (cpu->cfg.debug) {
59
+ riscv_trigger_realize(&cpu->env);
60
+ }
61
+#endif
62
+
63
qemu_init_vcpu(cs);
64
cpu_reset(cs);
65
20
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
66
diff --git a/target/riscv/debug.c b/target/riscv/debug.c
21
index XXXXXXX..XXXXXXX 100644
67
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/debug.c
68
--- a/target/riscv/debug.c
23
+++ b/target/riscv/debug.c
69
+++ b/target/riscv/debug.c
24
@@ -XXX,XX +XXX,XX @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
70
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
25
bool enabled = type2_breakpoint_enabled(ctrl);
71
return false;
26
CPUState *cs = env_cpu(env);
72
}
27
int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
73
28
- uint32_t size;
74
-void riscv_trigger_init(CPURISCVState *env)
29
+ uint32_t size, def_size;
75
+void riscv_trigger_realize(CPURISCVState *env)
30
76
+{
31
if (!enabled) {
77
+ int i;
32
return;
33
@@ -XXX,XX +XXX,XX @@ static void type2_breakpoint_insert(CPURISCVState *env, target_ulong index)
34
cpu_watchpoint_insert(cs, addr, size, flags,
35
&env->cpu_watchpoint[index]);
36
} else {
37
- cpu_watchpoint_insert(cs, addr, 8, flags,
38
+ def_size = riscv_cpu_mxl(env) == MXL_RV64 ? 8 : 4;
39
+
78
+
40
+ cpu_watchpoint_insert(cs, addr, def_size, flags,
79
+ for (i = 0; i < RV_MAX_TRIGGERS; i++) {
41
&env->cpu_watchpoint[index]);
80
+ env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
42
}
81
+ riscv_itrigger_timer_cb, env);
82
+ }
83
+}
84
+
85
+void riscv_trigger_reset_hold(CPURISCVState *env)
86
{
87
target_ulong tdata1 = build_tdata1(env, TRIGGER_TYPE_AD_MATCH, 0, 0);
88
int i;
89
@@ -XXX,XX +XXX,XX @@ void riscv_trigger_init(CPURISCVState *env)
90
env->tdata3[i] = 0;
91
env->cpu_breakpoint[i] = NULL;
92
env->cpu_watchpoint[i] = NULL;
93
- env->itrigger_timer[i] = timer_new_ns(QEMU_CLOCK_VIRTUAL,
94
- riscv_itrigger_timer_cb, env);
95
+ timer_del(env->itrigger_timer[i]);
43
}
96
}
97
}
44
--
98
--
45
2.48.1
99
2.41.0
46
100
47
101
diff view generated by jsdifflib
1
From: Rob Bradford <rbradford@rivosinc.com>
1
From: Leon Schuermann <leons@opentitan.org>
2
2
3
When running in TOR mode (Top of Range) the next PMP entry controls
3
When the rule-lock bypass (RLB) bit is set in the mseccfg CSR, the PMP
4
whether the entry is locked. However simply checking if the PMP_LOCK bit
4
configuration lock bits must not apply. While this behavior is
5
is set is not sufficient with the Smepmp extension which now provides a
5
implemented for the pmpcfgX CSRs, this bit is not respected for
6
bit (mseccfg.RLB (Rule Lock Bypass)) to disregard the lock bits. In
6
changes to the pmpaddrX CSRs. This patch ensures that pmpaddrX CSR
7
order to respect this bit use the convenience pmp_is_locked() function
7
writes work even on locked regions when the global rule-lock bypass is
8
rather than directly checking PMP_LOCK since this function checks
8
enabled.
9
mseccfg.RLB.
10
9
11
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
10
Signed-off-by: Leon Schuermann <leons@opentitan.org>
11
Reviewed-by: Mayuresh Chitale <mchitale@ventanamicro.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
13
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Message-ID: <20230829215046.1430463-1-leon@is.currently.online>
14
Message-ID: <20250210153713.343626-1-rbradford@rivosinc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
15
---
17
target/riscv/pmp.c | 2 +-
16
target/riscv/pmp.c | 4 ++++
18
1 file changed, 1 insertion(+), 1 deletion(-)
17
1 file changed, 4 insertions(+)
19
18
20
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
19
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
21
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
22
--- a/target/riscv/pmp.c
21
--- a/target/riscv/pmp.c
23
+++ b/target/riscv/pmp.c
22
+++ b/target/riscv/pmp.c
24
@@ -XXX,XX +XXX,XX @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
23
@@ -XXX,XX +XXX,XX @@ static inline uint8_t pmp_get_a_field(uint8_t cfg)
25
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
24
*/
26
is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
25
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
27
26
{
28
- if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) {
27
+ /* mseccfg.RLB is set */
29
+ if (pmp_is_locked(env, addr_index + 1) && is_next_cfg_tor) {
28
+ if (MSECCFG_RLB_ISSET(env)) {
30
qemu_log_mask(LOG_GUEST_ERROR,
29
+ return 0;
31
"ignoring pmpaddr write - pmpcfg + 1 locked\n");
30
+ }
32
return;
31
32
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
33
return 1;
33
--
34
--
34
2.48.1
35
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Tommy Wu <tommy.wu@sifive.com>
2
2
3
Coverity found a DEADCODE issue in rmw_xireg() claiming that we can't
3
According to the new spec, when vsiselect has a reserved value, attempts
4
reach 'RISCV_EXCP_VIRT_INSTRUCTION_FAULT' at the 'done' label:
4
from M-mode or HS-mode to access vsireg, or from VS-mode to access
5
sireg, should preferably raise an illegal instruction exception.
5
6
6
done:
7
Signed-off-by: Tommy Wu <tommy.wu@sifive.com>
7
if (ret) {
8
Reviewed-by: Frank Chang <frank.chang@sifive.com>
8
return (env->virt_enabled && virt) ?
9
Message-ID: <20230816061647.600672-1-tommy.wu@sifive.com>
9
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
10
}
11
return RISCV_EXCP_NONE;
12
13
This happens because the 'virt' flag, which is only used by 'done', is
14
set to 'false' and it will always remain 'false' in any condition where
15
we'll jump to 'done':
16
17
switch (csrno) {
18
(...)
19
case CSR_VSIREG:
20
isel = env->vsiselect;
21
virt = true;
22
break;
23
default:
24
goto done;
25
};
26
27
'virt = true' will never reach 'done' because we have a if/else-if/else
28
block right before the label that will always return:
29
30
if (xiselect_aia_range(isel)) {
31
return ...
32
} else if (...) {
33
return ...
34
} else {
35
return RISCV_EXCP_ILLEGAL_INST;
36
}
37
38
All this means that we can preserve the current logic by reducing the
39
'done' label to:
40
41
done:
42
if (ret) {
43
return RISCV_EXCP_ILLEGAL_INST;
44
}
45
return RISCV_EXCP_NONE;
46
47
The flag 'virt' is now unused. Remove it.
48
49
Fix the 'goto done' identation while we're at it.
50
51
Resolves: Coverity CID 1590359
52
Fixes: dc0280723d ("target/riscv: Decouple AIA processing from xiselect and xireg")
53
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
54
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
55
Message-ID: <20250121184847.2109128-2-dbarboza@ventanamicro.com>
56
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
57
---
11
---
58
target/riscv/csr.c | 7 ++-----
12
target/riscv/csr.c | 7 +++++--
59
1 file changed, 2 insertions(+), 5 deletions(-)
13
1 file changed, 5 insertions(+), 2 deletions(-)
60
14
61
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
15
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
62
index XXXXXXX..XXXXXXX 100644
16
index XXXXXXX..XXXXXXX 100644
63
--- a/target/riscv/csr.c
17
--- a/target/riscv/csr.c
64
+++ b/target/riscv/csr.c
18
+++ b/target/riscv/csr.c
65
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
19
@@ -XXX,XX +XXX,XX @@ static int rmw_iprio(target_ulong xlen,
66
target_ulong *val, target_ulong new_val,
20
static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
67
target_ulong wr_mask)
21
target_ulong new_val, target_ulong wr_mask)
68
{
22
{
69
- bool virt = false;
23
- bool virt;
24
+ bool virt, isel_reserved;
25
uint8_t *iprio;
70
int ret = -EINVAL;
26
int ret = -EINVAL;
71
target_ulong isel;
27
target_ulong priv, isel, vgein;
72
28
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
73
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
29
74
break;
30
/* Decode register details from CSR number */
75
case CSR_VSIREG:
31
virt = false;
76
isel = env->vsiselect;
32
+ isel_reserved = false;
77
- virt = true;
33
switch (csrno) {
78
break;
34
case CSR_MIREG:
79
default:
35
iprio = env->miprio;
80
- goto done;
36
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
81
+ goto done;
37
riscv_cpu_mxl_bits(env)),
82
};
38
val, new_val, wr_mask);
83
39
}
84
/*
40
+ } else {
85
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
41
+ isel_reserved = true;
42
}
86
43
87
done:
44
done:
88
if (ret) {
45
if (ret) {
89
- return (env->virt_enabled && virt) ?
46
- return (env->virt_enabled && virt) ?
90
- RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
47
+ return (env->virt_enabled && virt && !isel_reserved) ?
91
+ return RISCV_EXCP_ILLEGAL_INST;
48
RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
92
}
49
}
93
return RISCV_EXCP_NONE;
50
return RISCV_EXCP_NONE;
94
}
95
--
51
--
96
2.48.1
52
2.41.0
diff view generated by jsdifflib
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
1
From: Nikita Shubin <n.shubin@yadro.com>
2
2
3
Coverity found a second DEADCODE issue in rmw_xireg() claiming that we can't
3
As per ISA:
4
reach 'RISCV_EXCP_NONE' at the 'done' label:
5
4
6
> 2706 done:
5
"For CSRRWI, if rd=x0, then the instruction shall not read the CSR and
7
> 2707 if (ret) {
6
shall not cause any of the side effects that might occur on a CSR read."
8
> 2708 return (env->virt_enabled && virt) ?
9
> 2709 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
10
> 2710 }
11
>>>> CID 1590356: Control flow issues (DEADCODE)
12
>>>> Execution cannot reach this statement: "return RISCV_EXCP_NONE;".
13
> 2711 return RISCV_EXCP_NONE;
14
7
15
Our label is now reduced after fixing another deadcode in the previous
8
trans_csrrwi() and trans_csrrw() call do_csrw() if rd=x0, do_csrw() calls
16
patch but the problem reported here still remains:
9
riscv_csrrw_do64(), via helper_csrw() passing NULL as *ret_value.
17
10
18
done:
11
Signed-off-by: Nikita Shubin <n.shubin@yadro.com>
19
if (ret) {
20
return RISCV_EXCP_ILLEGAL_INST;
21
}
22
return RISCV_EXCP_NONE;
23
24
This happens because 'ret' changes only once at the start of the
25
function:
26
27
ret = smstateen_acc_ok(env, 0, SMSTATEEN0_SVSLCT);
28
if (ret != RISCV_EXCP_NONE) {
29
return ret;
30
}
31
32
So it's a guarantee that ret will be RISCV_EXCP_NONE (-1) if we ever
33
reach the label, i.e. "if (ret)" will always be true, and the label can
34
be even further reduced to:
35
36
done:
37
return RISCV_EXCP_ILLEGAL_INST;
38
39
To make a better use of the label, remove the 'else' from the
40
xiselect_aia_range() chain and let it fall-through to the 'done' label
41
since they are now both returning RISCV_EXCP_ILLEGAL_INST.
42
43
Resolves: Coverity CID 1590356
44
Fixes: dc0280723d ("target/riscv: Decouple AIA processing from xiselect and xireg")
45
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
46
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
12
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
47
Message-ID: <20250121184847.2109128-3-dbarboza@ventanamicro.com>
13
Message-ID: <20230808090914.17634-1-nikita.shubin@maquefel.me>
48
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
49
---
15
---
50
target/riscv/csr.c | 7 +------
16
target/riscv/csr.c | 24 +++++++++++++++---------
51
1 file changed, 1 insertion(+), 6 deletions(-)
17
1 file changed, 15 insertions(+), 9 deletions(-)
52
18
53
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
19
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
54
index XXXXXXX..XXXXXXX 100644
20
index XXXXXXX..XXXXXXX 100644
55
--- a/target/riscv/csr.c
21
--- a/target/riscv/csr.c
56
+++ b/target/riscv/csr.c
22
+++ b/target/riscv/csr.c
57
@@ -XXX,XX +XXX,XX @@ static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
23
@@ -XXX,XX +XXX,XX @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
58
} else if (riscv_cpu_cfg(env)->ext_smcsrind ||
24
target_ulong write_mask)
59
riscv_cpu_cfg(env)->ext_sscsrind) {
25
{
60
return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
26
RISCVException ret;
61
- } else {
27
- target_ulong old_value;
62
- return RISCV_EXCP_ILLEGAL_INST;
28
+ target_ulong old_value = 0;
29
30
/* execute combined read/write operation if it exists */
31
if (csr_ops[csrno].op) {
32
return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
63
}
33
}
64
34
65
done:
35
- /* if no accessor exists then return failure */
66
- if (ret) {
36
- if (!csr_ops[csrno].read) {
67
- return RISCV_EXCP_ILLEGAL_INST;
37
- return RISCV_EXCP_ILLEGAL_INST;
68
- }
38
- }
69
- return RISCV_EXCP_NONE;
39
- /* read old value */
70
+ return RISCV_EXCP_ILLEGAL_INST;
40
- ret = csr_ops[csrno].read(env, csrno, &old_value);
71
}
41
- if (ret != RISCV_EXCP_NONE) {
72
42
- return ret;
73
static RISCVException rmw_xtopei(CPURISCVState *env, int csrno,
43
+ /*
44
+ * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
45
+ * and we can't throw side effects caused by CSR reads.
46
+ */
47
+ if (ret_value) {
48
+ /* if no accessor exists then return failure */
49
+ if (!csr_ops[csrno].read) {
50
+ return RISCV_EXCP_ILLEGAL_INST;
51
+ }
52
+ /* read old value */
53
+ ret = csr_ops[csrno].read(env, csrno, &old_value);
54
+ if (ret != RISCV_EXCP_NONE) {
55
+ return ret;
56
+ }
57
}
58
59
/* write value if writable and write mask set, otherwise drop writes */
74
--
60
--
75
2.48.1
61
2.41.0
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
Coverity found a DEADCODE issue in rmw_xiregi() claiming that we can't
4
reach 'RISCV_EXCP_VIRT_INSTRUCTION_FAULT' at the 'done' label:
5
6
> 2652 done:
7
>>>> CID 1590357: Control flow issues (DEADCODE)
8
>>>> Execution cannot reach the expression "RISCV_EXCP_VIRT_INSTRUCTION_FAULT"
9
inside this statement: "return (env->virt_enabled &...".
10
> 2653 return (env->virt_enabled && virt) ?
11
> 2654 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
12
13
This happens because 'virt' is being set to 'false' and it will remain
14
as 'false' in any code path where 'done' will be called. The label can
15
be safely reduced to:
16
17
done:
18
return RISCV_EXCP_ILLEGAL_INST;
19
20
And that will leave us with the following usage of a 'goto' skipping a
21
single 'return' to do another single 'return':
22
23
} else {
24
goto done;
25
}
26
27
return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
28
29
done:
30
return RISCV_EXCP_ILLEGAL_INST;
31
32
Which we will eliminate it and just do 'return RISCV_EXCP_ILLEGAL_INST'
33
instead.
34
35
Resolves: Coverity CID 1590357
36
Fixes: 5e33a20827 ("target/riscv: Support generic CSR indirect access")
37
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
38
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
39
Message-ID: <20250121184847.2109128-4-dbarboza@ventanamicro.com>
40
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
41
---
42
target/riscv/csr.c | 8 +-------
43
1 file changed, 1 insertion(+), 7 deletions(-)
44
45
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
46
index XXXXXXX..XXXXXXX 100644
47
--- a/target/riscv/csr.c
48
+++ b/target/riscv/csr.c
49
@@ -XXX,XX +XXX,XX @@ static int rmw_xireg_csrind(CPURISCVState *env, int csrno,
50
static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
51
target_ulong new_val, target_ulong wr_mask)
52
{
53
- bool virt = false;
54
int ret = -EINVAL;
55
target_ulong isel;
56
57
@@ -XXX,XX +XXX,XX @@ static int rmw_xiregi(CPURISCVState *env, int csrno, target_ulong *val,
58
} else if (CSR_VSIREG <= csrno && csrno <= CSR_VSIREG6 &&
59
csrno != CSR_VSIREG4 - 1) {
60
isel = env->vsiselect;
61
- virt = true;
62
} else {
63
- goto done;
64
+ return RISCV_EXCP_ILLEGAL_INST;
65
}
66
67
return rmw_xireg_csrind(env, csrno, isel, val, new_val, wr_mask);
68
-
69
-done:
70
- return (env->virt_enabled && virt) ?
71
- RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
72
}
73
74
static RISCVException rmw_xireg(CPURISCVState *env, int csrno,
75
--
76
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
Coverity reported a BAD_SHIFT issue in the following code:
4
5
> 2097
6
>>>> CID 1590355: Integer handling issues (BAD_SHIFT)
7
>>>> In expression "hdeleg >> cause", right shifting by more than 63
8
bits has undefined behavior. The shift amount, "cause", is at least 64.
9
> 2098 vsmode_exc = env->virt_enabled && (((hdeleg >> cause) & 1) || vs_injected);
10
> 2099 /*
11
12
It is not clear to me how the tool guarantees that '"cause" is at least
13
64', but indeed there's no guarantees that it would be < 64 in the
14
'async = true' code path.
15
16
A simple fix to avoid a potential UB is to add a 'cause < 64' guard like
17
'mode' is already doing right before 'vsmode_exc'.
18
19
Resolves: Coverity CID 1590355
20
Fixes: 967760f62c ("target/riscv: Implement Ssdbltrp exception handling")
21
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
22
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
23
Message-ID: <20250121184847.2109128-6-dbarboza@ventanamicro.com>
24
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
25
---
26
target/riscv/cpu_helper.c | 4 +++-
27
1 file changed, 3 insertions(+), 1 deletion(-)
28
29
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/target/riscv/cpu_helper.c
32
+++ b/target/riscv/cpu_helper.c
33
@@ -XXX,XX +XXX,XX @@ void riscv_cpu_do_interrupt(CPUState *cs)
34
mode = env->priv <= PRV_S && cause < 64 &&
35
(((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
36
37
- vsmode_exc = env->virt_enabled && (((hdeleg >> cause) & 1) || vs_injected);
38
+ vsmode_exc = env->virt_enabled && cause < 64 &&
39
+ (((hdeleg >> cause) & 1) || vs_injected);
40
+
41
/*
42
* Check double trap condition only if already in S-mode and targeting
43
* S-mode
44
--
45
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
In the RISC-V privileged ISA section 3.1.15 table 15, it is determined
4
that a debug exception that is triggered from a load/store has a higher
5
priority than a possible fault that this access might trigger.
6
7
This is not the case ATM as shown in [1]. Adding a breakpoint in an
8
address that deliberately will fault is causing a load page fault
9
instead of a debug exception. The reason is that we're throwing in the
10
page fault as soon as the fault occurs (end of riscv_cpu_tlb_fill(),
11
raise_mmu_exception()), not allowing the installed watchpoints to
12
trigger.
13
14
Call cpu_check_watchpoint() in the page fault path to search and execute
15
any watchpoints that might exist for the address, never returning back
16
to the fault path. If no watchpoints are found cpu_check_watchpoint()
17
will return and we'll fall-through the regular path to
18
raise_mmu_exception().
19
20
[1] https://gitlab.com/qemu-project/qemu/-/issues/2627
21
22
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2627
23
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
24
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
25
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
26
Message-ID: <20250121170626.1992570-3-dbarboza@ventanamicro.com>
27
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
28
---
29
target/riscv/cpu_helper.c | 18 ++++++++++++++++++
30
1 file changed, 18 insertions(+)
31
32
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
33
index XXXXXXX..XXXXXXX 100644
34
--- a/target/riscv/cpu_helper.c
35
+++ b/target/riscv/cpu_helper.c
36
@@ -XXX,XX +XXX,XX @@
37
#include "exec/page-protection.h"
38
#include "instmap.h"
39
#include "tcg/tcg-op.h"
40
+#include "hw/core/tcg-cpu-ops.h"
41
#include "trace.h"
42
#include "semihosting/common-semi.h"
43
#include "system/cpu-timers.h"
44
@@ -XXX,XX +XXX,XX @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
45
} else if (probe) {
46
return false;
47
} else {
48
+ int wp_access = 0;
49
+
50
+ if (access_type == MMU_DATA_LOAD) {
51
+ wp_access |= BP_MEM_READ;
52
+ } else if (access_type == MMU_DATA_STORE) {
53
+ wp_access |= BP_MEM_WRITE;
54
+ }
55
+
56
+ /*
57
+ * If a watchpoint isn't found for 'addr' this will
58
+ * be a no-op and we'll resume the mmu_exception path.
59
+ * Otherwise we'll throw a debug exception and execution
60
+ * will continue elsewhere.
61
+ */
62
+ cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
63
+ wp_access, retaddr);
64
+
65
raise_mmu_exception(env, address, access_type, pmp_violation,
66
first_stage_error, two_stage_lookup,
67
two_stage_indirect_error);
68
--
69
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Huang Borong <huangborong@bosc.ac.cn>
2
1
3
Remove the redundant masking of "hart_idx", as the same operation is
4
performed later during address calculation.
5
6
This change impacts the "hart_idx" value in the final qemu_log_mask()
7
call. The original "hart_idx" parameter should be used for logging to
8
ensure accuracy, rather than the masked value.
9
10
Signed-off-by: Huang Borong <huangborong@bosc.ac.cn>
11
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
12
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
13
Message-ID: <20250115035105.19600-1-huangborong@bosc.ac.cn>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
hw/intc/riscv_aplic.c | 1 -
17
1 file changed, 1 deletion(-)
18
19
diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c
20
index XXXXXXX..XXXXXXX 100644
21
--- a/hw/intc/riscv_aplic.c
22
+++ b/hw/intc/riscv_aplic.c
23
@@ -XXX,XX +XXX,XX @@ static void riscv_aplic_msi_send(RISCVAPLICState *aplic,
24
APLIC_xMSICFGADDRH_HHXW_MASK;
25
26
group_idx = hart_idx >> lhxw;
27
- hart_idx &= APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw);
28
29
addr = msicfgaddr;
30
addr |= ((uint64_t)(msicfgaddrH & APLIC_xMSICFGADDRH_BAPPN_MASK)) << 32;
31
--
32
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
The S profiles do a priv_ver check during validation to see if the
4
running priv_ver is compatible with it. This check is done by comparing
5
if the running priv_ver is equal to the priv_ver the profile specifies.
6
7
There is an universe where we added RVA23S64 support based on both
8
RVA23U64 and RVA22S64 and this error is being thrown:
9
10
qemu-system-riscv64: warning: Profile rva22s64 requires
11
priv spec v1.12.0, but priv ver v1.13.0 was set
12
13
We're enabling RVA22S64 (priv_ver 1.12) as a dependency of RVA23S64
14
(priv_ver 1.13) and complaining to users about what we did ourselves.
15
16
There's no drawback in allowing a profile to run in an env that has a
17
priv_ver newer than it's required by it. So, like Hiro Nakamura saves
18
the future by changing the past, change the priv_ver check now to allow
19
profiles to run in a newer priv_ver. This universe will have one less
20
warning to deal with.
21
22
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
23
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
24
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
25
Message-ID: <20250115184316.2344583-5-dbarboza@ventanamicro.com>
26
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
27
---
28
target/riscv/tcg/tcg-cpu.c | 2 +-
29
1 file changed, 1 insertion(+), 1 deletion(-)
30
31
diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c
32
index XXXXXXX..XXXXXXX 100644
33
--- a/target/riscv/tcg/tcg-cpu.c
34
+++ b/target/riscv/tcg/tcg-cpu.c
35
@@ -XXX,XX +XXX,XX @@ static void riscv_cpu_validate_profile(RISCVCPU *cpu,
36
#endif
37
38
if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
39
- profile->priv_spec != env->priv_ver) {
40
+ profile->priv_spec > env->priv_ver) {
41
profile_impl = false;
42
43
if (send_warn) {
44
--
45
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Alistair Francis <alistair23@gmail.com>
2
1
3
Bin Meng has been a long time contributor and maintainer for QEMU RISC-V
4
and has been very beneficial to the RISC-V ecosystem.
5
6
Unfortunately his email has started to bounce so this patch is removing
7
them from MAINTAINERS. If in the future Bin Meng wants to return we will
8
happily re-add them.
9
10
Note that I'm not removing Bin Meng as a "SD (Secure Card)" maintainer.
11
12
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
13
Acked-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
14
Message-ID: <20250128060546.1374394-1-alistair.francis@wdc.com>
15
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
16
---
17
MAINTAINERS | 5 +----
18
1 file changed, 1 insertion(+), 4 deletions(-)
19
20
diff --git a/MAINTAINERS b/MAINTAINERS
21
index XXXXXXX..XXXXXXX 100644
22
--- a/MAINTAINERS
23
+++ b/MAINTAINERS
24
@@ -XXX,XX +XXX,XX @@ F: tests/functional/test_ppc_74xx.py
25
RISC-V TCG CPUs
26
M: Palmer Dabbelt <palmer@dabbelt.com>
27
M: Alistair Francis <alistair.francis@wdc.com>
28
-M: Bin Meng <bmeng.cn@gmail.com>
29
R: Weiwei Li <liwei1518@gmail.com>
30
R: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
31
R: Liu Zhiwei <zhiwei_liu@linux.alibaba.com>
32
@@ -XXX,XX +XXX,XX @@ F: include/hw/riscv/opentitan.h
33
F: include/hw/*/ibex_*.h
34
35
Microchip PolarFire SoC Icicle Kit
36
-M: Bin Meng <bmeng.cn@gmail.com>
37
L: qemu-riscv@nongnu.org
38
S: Supported
39
F: docs/system/riscv/microchip-icicle-kit.rst
40
@@ -XXX,XX +XXX,XX @@ F: include/hw/char/shakti_uart.h
41
42
SiFive Machines
43
M: Alistair Francis <Alistair.Francis@wdc.com>
44
-M: Bin Meng <bmeng.cn@gmail.com>
45
M: Palmer Dabbelt <palmer@dabbelt.com>
46
L: qemu-riscv@nongnu.org
47
S: Supported
48
@@ -XXX,XX +XXX,XX @@ S: Orphan
49
F: hw/i386/amd_iommu.?
50
51
OpenSBI Firmware
52
-M: Bin Meng <bmeng.cn@gmail.com>
53
+L: qemu-riscv@nongnu.org
54
S: Supported
55
F: pc-bios/opensbi-*
56
F: .gitlab-ci.d/opensbi.yml
57
--
58
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
2
1
3
The Control Transfer Records (CTR) extension provides a method to
4
record a limited branch history in register-accessible internal chip
5
storage.
6
7
This extension is similar to Arch LBR in x86 and BRBE in ARM.
8
The Extension has been stable and the latest release can be found here
9
https://github.com/riscv/riscv-control-transfer-records/releases/tag/v1.0_rc5
10
11
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
12
Acked-by: Alistair Francis <alistair.francis@wdc.com>
13
Message-ID: <20250205-b4-ctr_upstream_v6-v6-2-439d8e06c8ef@rivosinc.com>
14
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
15
---
16
target/riscv/cpu_bits.h | 145 ++++++++++++++++++++++++++++++++++++++++
17
1 file changed, 145 insertions(+)
18
19
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
20
index XXXXXXX..XXXXXXX 100644
21
--- a/target/riscv/cpu_bits.h
22
+++ b/target/riscv/cpu_bits.h
23
@@ -XXX,XX +XXX,XX @@
24
#define CSR_SIEH 0x114
25
#define CSR_SIPH 0x154
26
27
+/* Machine-Level Control transfer records CSRs */
28
+#define CSR_MCTRCTL 0x34e
29
+
30
+/* Supervisor-Level Control transfer records CSRs */
31
+#define CSR_SCTRCTL 0x14e
32
+#define CSR_SCTRSTATUS 0x14f
33
+#define CSR_SCTRDEPTH 0x15f
34
+
35
+/* VS-Level Control transfer records CSRs */
36
+#define CSR_VSCTRCTL 0x24e
37
+
38
/* Hpervisor CSRs */
39
#define CSR_HSTATUS 0x600
40
#define CSR_HEDELEG 0x602
41
@@ -XXX,XX +XXX,XX @@
42
#define SMSTATEEN0_CS (1ULL << 0)
43
#define SMSTATEEN0_FCSR (1ULL << 1)
44
#define SMSTATEEN0_JVT (1ULL << 2)
45
+#define SMSTATEEN0_CTR (1ULL << 54)
46
#define SMSTATEEN0_P1P13 (1ULL << 56)
47
#define SMSTATEEN0_HSCONTXT (1ULL << 57)
48
#define SMSTATEEN0_IMSIC (1ULL << 58)
49
@@ -XXX,XX +XXX,XX @@ typedef enum RISCVException {
50
#define HENVCFGH_PBMTE MENVCFGH_PBMTE
51
#define HENVCFGH_STCE MENVCFGH_STCE
52
53
+/* Offsets for every pair of control bits per each priv level */
54
+#define XS_OFFSET 0ULL
55
+#define U_OFFSET 2ULL
56
+#define S_OFFSET 5ULL
57
+#define M_OFFSET 8ULL
58
+
59
+#define PM_XS_BITS (EXT_STATUS_MASK << XS_OFFSET)
60
+#define U_PM_ENABLE (PM_ENABLE << U_OFFSET)
61
+#define U_PM_CURRENT (PM_CURRENT << U_OFFSET)
62
+#define U_PM_INSN (PM_INSN << U_OFFSET)
63
+#define S_PM_ENABLE (PM_ENABLE << S_OFFSET)
64
+#define S_PM_CURRENT (PM_CURRENT << S_OFFSET)
65
+#define S_PM_INSN (PM_INSN << S_OFFSET)
66
+#define M_PM_ENABLE (PM_ENABLE << M_OFFSET)
67
+#define M_PM_CURRENT (PM_CURRENT << M_OFFSET)
68
+#define M_PM_INSN (PM_INSN << M_OFFSET)
69
+
70
+/* mmte CSR bits */
71
+#define MMTE_PM_XS_BITS PM_XS_BITS
72
+#define MMTE_U_PM_ENABLE U_PM_ENABLE
73
+#define MMTE_U_PM_CURRENT U_PM_CURRENT
74
+#define MMTE_U_PM_INSN U_PM_INSN
75
+#define MMTE_S_PM_ENABLE S_PM_ENABLE
76
+#define MMTE_S_PM_CURRENT S_PM_CURRENT
77
+#define MMTE_S_PM_INSN S_PM_INSN
78
+#define MMTE_M_PM_ENABLE M_PM_ENABLE
79
+#define MMTE_M_PM_CURRENT M_PM_CURRENT
80
+#define MMTE_M_PM_INSN M_PM_INSN
81
+#define MMTE_MASK (MMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | MMTE_U_PM_INSN | \
82
+ MMTE_S_PM_ENABLE | MMTE_S_PM_CURRENT | MMTE_S_PM_INSN | \
83
+ MMTE_M_PM_ENABLE | MMTE_M_PM_CURRENT | MMTE_M_PM_INSN | \
84
+ MMTE_PM_XS_BITS)
85
+
86
+/* (v)smte CSR bits */
87
+#define SMTE_PM_XS_BITS PM_XS_BITS
88
+#define SMTE_U_PM_ENABLE U_PM_ENABLE
89
+#define SMTE_U_PM_CURRENT U_PM_CURRENT
90
+#define SMTE_U_PM_INSN U_PM_INSN
91
+#define SMTE_S_PM_ENABLE S_PM_ENABLE
92
+#define SMTE_S_PM_CURRENT S_PM_CURRENT
93
+#define SMTE_S_PM_INSN S_PM_INSN
94
+#define SMTE_MASK (SMTE_U_PM_ENABLE | SMTE_U_PM_CURRENT | SMTE_U_PM_INSN | \
95
+ SMTE_S_PM_ENABLE | SMTE_S_PM_CURRENT | SMTE_S_PM_INSN | \
96
+ SMTE_PM_XS_BITS)
97
+
98
+/* umte CSR bits */
99
+#define UMTE_U_PM_ENABLE U_PM_ENABLE
100
+#define UMTE_U_PM_CURRENT U_PM_CURRENT
101
+#define UMTE_U_PM_INSN U_PM_INSN
102
+#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN)
103
+
104
+/* CTR control register commom fields */
105
+#define XCTRCTL_U BIT_ULL(0)
106
+#define XCTRCTL_S BIT_ULL(1)
107
+#define XCTRCTL_RASEMU BIT_ULL(7)
108
+#define XCTRCTL_STE BIT_ULL(8)
109
+#define XCTRCTL_BPFRZ BIT_ULL(11)
110
+#define XCTRCTL_LCOFIFRZ BIT_ULL(12)
111
+#define XCTRCTL_EXCINH BIT_ULL(33)
112
+#define XCTRCTL_INTRINH BIT_ULL(34)
113
+#define XCTRCTL_TRETINH BIT_ULL(35)
114
+#define XCTRCTL_NTBREN BIT_ULL(36)
115
+#define XCTRCTL_TKBRINH BIT_ULL(37)
116
+#define XCTRCTL_INDCALLINH BIT_ULL(40)
117
+#define XCTRCTL_DIRCALLINH BIT_ULL(41)
118
+#define XCTRCTL_INDJMPINH BIT_ULL(42)
119
+#define XCTRCTL_DIRJMPINH BIT_ULL(43)
120
+#define XCTRCTL_CORSWAPINH BIT_ULL(44)
121
+#define XCTRCTL_RETINH BIT_ULL(45)
122
+#define XCTRCTL_INDLJMPINH BIT_ULL(46)
123
+#define XCTRCTL_DIRLJMPINH BIT_ULL(47)
124
+
125
+#define XCTRCTL_MASK (XCTRCTL_U | XCTRCTL_S | XCTRCTL_RASEMU | \
126
+ XCTRCTL_STE | XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ | \
127
+ XCTRCTL_EXCINH | XCTRCTL_INTRINH | XCTRCTL_TRETINH | \
128
+ XCTRCTL_NTBREN | XCTRCTL_TKBRINH | XCTRCTL_INDCALLINH | \
129
+ XCTRCTL_DIRCALLINH | XCTRCTL_INDJMPINH | \
130
+ XCTRCTL_DIRJMPINH | XCTRCTL_CORSWAPINH | \
131
+ XCTRCTL_RETINH | XCTRCTL_INDLJMPINH | XCTRCTL_DIRLJMPINH)
132
+
133
+#define XCTRCTL_INH_START 32U
134
+
135
+/* CTR mctrctl bits */
136
+#define MCTRCTL_M BIT_ULL(2)
137
+#define MCTRCTL_MTE BIT_ULL(9)
138
+
139
+#define MCTRCTL_MASK (XCTRCTL_MASK | MCTRCTL_M | MCTRCTL_MTE)
140
+#define SCTRCTL_MASK XCTRCTL_MASK
141
+#define VSCTRCTL_MASK XCTRCTL_MASK
142
+
143
+/* sctrstatus CSR bits. */
144
+#define SCTRSTATUS_WRPTR_MASK 0xFF
145
+#define SCTRSTATUS_FROZEN BIT(31)
146
+#define SCTRSTATUS_MASK (SCTRSTATUS_WRPTR_MASK | SCTRSTATUS_FROZEN)
147
+
148
+/* sctrdepth CSR bits. */
149
+#define SCTRDEPTH_MASK 0x7
150
+#define SCTRDEPTH_MIN 0U /* 16 Entries. */
151
+#define SCTRDEPTH_MAX 4U /* 256 Entries. */
152
+
153
+#define CTR_ENTRIES_FIRST 0x200
154
+#define CTR_ENTRIES_LAST 0x2ff
155
+
156
+#define CTRSOURCE_VALID BIT(0)
157
+#define CTRTARGET_MISP BIT(0)
158
+
159
+#define CTRDATA_TYPE_MASK 0xF
160
+#define CTRDATA_CCV BIT(15)
161
+#define CTRDATA_CCM_MASK 0xFFF0000
162
+#define CTRDATA_CCE_MASK 0xF0000000
163
+
164
+#define CTRDATA_MASK (CTRDATA_TYPE_MASK | CTRDATA_CCV | \
165
+ CTRDATA_CCM_MASK | CTRDATA_CCE_MASK)
166
+
167
+typedef enum CTRType {
168
+ CTRDATA_TYPE_NONE = 0,
169
+ CTRDATA_TYPE_EXCEPTION = 1,
170
+ CTRDATA_TYPE_INTERRUPT = 2,
171
+ CTRDATA_TYPE_EXCEP_INT_RET = 3,
172
+ CTRDATA_TYPE_NONTAKEN_BRANCH = 4,
173
+ CTRDATA_TYPE_TAKEN_BRANCH = 5,
174
+ CTRDATA_TYPE_RESERVED_0 = 6,
175
+ CTRDATA_TYPE_RESERVED_1 = 7,
176
+ CTRDATA_TYPE_INDIRECT_CALL = 8,
177
+ CTRDATA_TYPE_DIRECT_CALL = 9,
178
+ CTRDATA_TYPE_INDIRECT_JUMP = 10,
179
+ CTRDATA_TYPE_DIRECT_JUMP = 11,
180
+ CTRDATA_TYPE_CO_ROUTINE_SWAP = 12,
181
+ CTRDATA_TYPE_RETURN = 13,
182
+ CTRDATA_TYPE_OTHER_INDIRECT_JUMP = 14,
183
+ CTRDATA_TYPE_OTHER_DIRECT_JUMP = 15,
184
+} CTRType;
185
+
186
/* MISELECT, SISELECT, and VSISELECT bits (AIA) */
187
#define ISELECT_IPRIO0 0x30
188
#define ISELECT_IPRIO15 0x3f
189
--
190
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
2
1
3
Add a subsection to machine.c to migrate CTR CSR state
4
5
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
6
Acked-by: Alistair Francis <alistair.francis@wdc.com>
7
Message-ID: <20250205-b4-ctr_upstream_v6-v6-6-439d8e06c8ef@rivosinc.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
target/riscv/machine.c | 25 +++++++++++++++++++++++++
11
1 file changed, 25 insertions(+)
12
13
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/target/riscv/machine.c
16
+++ b/target/riscv/machine.c
17
@@ -XXX,XX +XXX,XX @@ static const VMStateDescription vmstate_envcfg = {
18
}
19
};
20
21
+static bool ctr_needed(void *opaque)
22
+{
23
+ RISCVCPU *cpu = opaque;
24
+
25
+ return cpu->cfg.ext_smctr || cpu->cfg.ext_ssctr;
26
+}
27
+
28
+static const VMStateDescription vmstate_ctr = {
29
+ .name = "cpu/ctr",
30
+ .version_id = 1,
31
+ .minimum_version_id = 1,
32
+ .needed = ctr_needed,
33
+ .fields = (const VMStateField[]) {
34
+ VMSTATE_UINT64(env.mctrctl, RISCVCPU),
35
+ VMSTATE_UINT32(env.sctrdepth, RISCVCPU),
36
+ VMSTATE_UINT32(env.sctrstatus, RISCVCPU),
37
+ VMSTATE_UINT64(env.vsctrctl, RISCVCPU),
38
+ VMSTATE_UINT64_ARRAY(env.ctr_src, RISCVCPU, 16 << SCTRDEPTH_MAX),
39
+ VMSTATE_UINT64_ARRAY(env.ctr_dst, RISCVCPU, 16 << SCTRDEPTH_MAX),
40
+ VMSTATE_UINT64_ARRAY(env.ctr_data, RISCVCPU, 16 << SCTRDEPTH_MAX),
41
+ VMSTATE_END_OF_LIST()
42
+ }
43
+};
44
+
45
static bool pmu_needed(void *opaque)
46
{
47
RISCVCPU *cpu = opaque;
48
@@ -XXX,XX +XXX,XX @@ const VMStateDescription vmstate_riscv_cpu = {
49
&vmstate_jvt,
50
&vmstate_elp,
51
&vmstate_ssp,
52
+ &vmstate_ctr,
53
NULL
54
}
55
};
56
--
57
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Rob Bradford <rbradford@rivosinc.com>
2
1
3
Some extra spaces made into into the RISC-V opcode data table.
4
5
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
6
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
7
Message-ID: <20250206153410.236636-2-rbradford@rivosinc.com>
8
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
9
---
10
disas/riscv.c | 12 ++++++------
11
1 file changed, 6 insertions(+), 6 deletions(-)
12
13
diff --git a/disas/riscv.c b/disas/riscv.c
14
index XXXXXXX..XXXXXXX 100644
15
--- a/disas/riscv.c
16
+++ b/disas/riscv.c
17
@@ -XXX,XX +XXX,XX @@ const rv_opcode_data rvi_opcode_data[] = {
18
{ "aes32esi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
19
{ "aes32dsmi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
20
{ "aes32dsi", rv_codec_k_bs, rv_fmt_rs1_rs2_bs, NULL, 0, 0, 0 },
21
- { "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 },
22
+ { "aes64ks1i", rv_codec_k_rnum, rv_fmt_rd_rs1_rnum, NULL, 0, 0, 0 },
23
{ "aes64ks2", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
24
{ "aes64im", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0 },
25
{ "aes64esm", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
26
@@ -XXX,XX +XXX,XX @@ const rv_opcode_data rvi_opcode_data[] = {
27
{ "mop.rr.5", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
28
{ "mop.rr.6", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
29
{ "mop.rr.7", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 },
30
- { "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
31
- { "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
32
- { "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
33
- { "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
34
- { "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
35
+ { "c.mop.1", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
36
+ { "c.mop.3", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
37
+ { "c.mop.5", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
38
+ { "c.mop.7", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
39
+ { "c.mop.9", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
40
{ "c.mop.11", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
41
{ "c.mop.13", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
42
{ "c.mop.15", rv_codec_ci_none, rv_fmt_none, NULL, 0, 0, 0 },
43
--
44
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Rob Bradford <rbradford@rivosinc.com>
2
1
3
This reflects the latest frozen version of the RISC-V Debug
4
specification (1.0.0-rc4) which includes the Sdtrig extension.
5
6
Signed-off-by: Rob Bradford <rbradford@rivosinc.com>
7
Acked-by: Alistair Francis <alistair.francis@wdc.com>
8
Message-ID: <20250206153410.236636-3-rbradford@rivosinc.com>
9
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
10
---
11
disas/riscv.c | 4 +++-
12
1 file changed, 3 insertions(+), 1 deletion(-)
13
14
diff --git a/disas/riscv.c b/disas/riscv.c
15
index XXXXXXX..XXXXXXX 100644
16
--- a/disas/riscv.c
17
+++ b/disas/riscv.c
18
@@ -XXX,XX +XXX,XX @@ static const char *csr_name(int csrno)
19
case 0x07a1: return "tdata1";
20
case 0x07a2: return "tdata2";
21
case 0x07a3: return "tdata3";
22
+ case 0x07a4: return "tinfo";
23
case 0x07b0: return "dcsr";
24
case 0x07b1: return "dpc";
25
- case 0x07b2: return "dscratch";
26
+ case 0x07b2: return "dscratch0";
27
+ case 0x07b3: return "dscratch1";
28
case 0x0b00: return "mcycle";
29
case 0x0b01: return "mtime";
30
case 0x0b02: return "minstret";
31
--
32
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Atish Patra <atishp@rivosinc.com>
2
1
3
As per the latest privilege specification v1.13[1], the sscofpmf
4
only reserves first 8 bits of hpmeventX. Update the corresponding
5
masks accordingly.
6
7
[1]https://github.com/riscv/riscv-isa-manual/issues/1578
8
9
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
10
Signed-off-by: Atish Patra <atishp@rivosinc.com>
11
Acked-by: Alistair Francis <alistair.francis@wdc.com>
12
Message-ID: <20250206-pmu_minor_fixes-v2-1-1bb0f4aeb8b4@rivosinc.com>
13
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
14
---
15
target/riscv/cpu_bits.h | 5 ++---
16
1 file changed, 2 insertions(+), 3 deletions(-)
17
18
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/target/riscv/cpu_bits.h
21
+++ b/target/riscv/cpu_bits.h
22
@@ -XXX,XX +XXX,XX @@ typedef enum CTRType {
23
MHPMEVENTH_BIT_VSINH | \
24
MHPMEVENTH_BIT_VUINH)
25
26
-#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
27
-#define MHPMEVENT_IDX_MASK 0xFFFFF
28
-#define MHPMEVENT_SSCOF_RESVD 16
29
+#define MHPMEVENT_SSCOF_MASK MAKE_64BIT_MASK(63, 56)
30
+#define MHPMEVENT_IDX_MASK (~MHPMEVENT_SSCOF_MASK)
31
32
/* RISC-V-specific interrupt pending bits. */
33
#define CPU_INTERRUPT_RNMI CPU_INTERRUPT_TGT_EXT_0
34
--
35
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Atish Patra <atishp@rivosinc.com>
2
1
3
As per the ISA definition, the upper 8 bits in hpmevent are defined
4
by Sscofpmf for privilege mode filtering and overflow bits while the
5
lower 56 bits are desginated for platform specific hpmevent values.
6
For the reset case, mhpmevent value should have zero in lower 56 bits.
7
Software may set the OF bit to indicate disable interrupt.
8
9
Ensure that correct value is checked after masking while clearing the
10
event encodings.
11
12
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
13
Acked-by: Alistair Francis <alistair.francis@wdc.com>
14
Signed-off-by: Atish Patra <atishp@rivosinc.com>
15
Message-ID: <20250206-pmu_minor_fixes-v2-2-1bb0f4aeb8b4@rivosinc.com>
16
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
17
---
18
target/riscv/pmu.c | 2 +-
19
1 file changed, 1 insertion(+), 1 deletion(-)
20
21
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
22
index XXXXXXX..XXXXXXX 100644
23
--- a/target/riscv/pmu.c
24
+++ b/target/riscv/pmu.c
25
@@ -XXX,XX +XXX,XX @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
26
* Expected mhpmevent value is zero for reset case. Remove the current
27
* mapping.
28
*/
29
- if (!value) {
30
+ if (!(value & MHPMEVENT_IDX_MASK)) {
31
g_hash_table_foreach_remove(cpu->pmu_event_ctr_map,
32
pmu_remove_event_map,
33
GUINT_TO_POINTER(ctr_idx));
34
--
35
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
2
1
3
Update headers to retrieve the latest KVM caps for RISC-V.
4
5
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
6
Message-ID: <20250221153758.652078-2-dbarboza@ventanamicro.com>
7
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
8
---
9
include/standard-headers/linux/ethtool.h | 4 +
10
include/standard-headers/linux/fuse.h | 76 ++++++++++++++++++-
11
.../linux/input-event-codes.h | 1 +
12
include/standard-headers/linux/pci_regs.h | 16 ++--
13
include/standard-headers/linux/virtio_pci.h | 14 ++++
14
linux-headers/asm-arm64/kvm.h | 3 -
15
linux-headers/asm-loongarch/kvm_para.h | 1 +
16
linux-headers/asm-riscv/kvm.h | 7 +-
17
linux-headers/asm-x86/kvm.h | 1 +
18
linux-headers/linux/iommufd.h | 35 ++++++---
19
linux-headers/linux/kvm.h | 8 +-
20
linux-headers/linux/stddef.h | 13 +++-
21
linux-headers/linux/vduse.h | 2 +-
22
13 files changed, 146 insertions(+), 35 deletions(-)
23
24
diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h
25
index XXXXXXX..XXXXXXX 100644
26
--- a/include/standard-headers/linux/ethtool.h
27
+++ b/include/standard-headers/linux/ethtool.h
28
@@ -XXX,XX +XXX,XX @@ enum ethtool_link_ext_substate_module {
29
* @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
30
* @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
31
* @ETH_SS_STATS_RMON: names of RMON statistics
32
+ * @ETH_SS_STATS_PHY: names of PHY(dev) statistics
33
+ * @ETH_SS_TS_FLAGS: hardware timestamping flags
34
*
35
* @ETH_SS_COUNT: number of defined string sets
36
*/
37
@@ -XXX,XX +XXX,XX @@ enum ethtool_stringset {
38
    ETH_SS_STATS_ETH_MAC,
39
    ETH_SS_STATS_ETH_CTRL,
40
    ETH_SS_STATS_RMON,
41
+    ETH_SS_STATS_PHY,
42
+    ETH_SS_TS_FLAGS,
43
44
    /* add new constants above here */
45
    ETH_SS_COUNT
46
diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h
47
index XXXXXXX..XXXXXXX 100644
48
--- a/include/standard-headers/linux/fuse.h
49
+++ b/include/standard-headers/linux/fuse.h
50
@@ -XXX,XX +XXX,XX @@
51
*
52
* 7.41
53
* - add FUSE_ALLOW_IDMAP
54
+ * 7.42
55
+ * - Add FUSE_OVER_IO_URING and all other io-uring related flags and data
56
+ * structures:
57
+ * - struct fuse_uring_ent_in_out
58
+ * - struct fuse_uring_req_header
59
+ * - struct fuse_uring_cmd_req
60
+ * - FUSE_URING_IN_OUT_HEADER_SZ
61
+ * - FUSE_URING_OP_IN_OUT_SZ
62
+ * - enum fuse_uring_cmd
63
*/
64
65
#ifndef _LINUX_FUSE_H
66
@@ -XXX,XX +XXX,XX @@
67
#define FUSE_KERNEL_VERSION 7
68
69
/** Minor version number of this interface */
70
-#define FUSE_KERNEL_MINOR_VERSION 41
71
+#define FUSE_KERNEL_MINOR_VERSION 42
72
73
/** The node ID of the root inode */
74
#define FUSE_ROOT_ID 1
75
@@ -XXX,XX +XXX,XX @@ struct fuse_file_lock {
76
* FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit
77
*         of the request ID indicates resend requests
78
* FUSE_ALLOW_IDMAP: allow creation of idmapped mounts
79
+ * FUSE_OVER_IO_URING: Indicate that client supports io-uring
80
*/
81
#define FUSE_ASYNC_READ        (1 << 0)
82
#define FUSE_POSIX_LOCKS    (1 << 1)
83
@@ -XXX,XX +XXX,XX @@ struct fuse_file_lock {
84
/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */
85
#define FUSE_DIRECT_IO_RELAX    FUSE_DIRECT_IO_ALLOW_MMAP
86
#define FUSE_ALLOW_IDMAP    (1ULL << 40)
87
+#define FUSE_OVER_IO_URING    (1ULL << 41)
88
89
/**
90
* CUSE INIT request/reply flags
91
@@ -XXX,XX +XXX,XX @@ struct fuse_supp_groups {
92
    uint32_t    groups[];
93
};
94
95
+/**
96
+ * Size of the ring buffer header
97
+ */
98
+#define FUSE_URING_IN_OUT_HEADER_SZ 128
99
+#define FUSE_URING_OP_IN_OUT_SZ 128
100
+
101
+/* Used as part of the fuse_uring_req_header */
102
+struct fuse_uring_ent_in_out {
103
+    uint64_t flags;
104
+
105
+    /*
106
+     * commit ID to be used in a reply to a ring request (see also
107
+     * struct fuse_uring_cmd_req)
108
+     */
109
+    uint64_t commit_id;
110
+
111
+    /* size of user payload buffer */
112
+    uint32_t payload_sz;
113
+    uint32_t padding;
114
+
115
+    uint64_t reserved;
116
+};
117
+
118
+/**
119
+ * Header for all fuse-io-uring requests
120
+ */
121
+struct fuse_uring_req_header {
122
+    /* struct fuse_in_header / struct fuse_out_header */
123
+    char in_out[FUSE_URING_IN_OUT_HEADER_SZ];
124
+
125
+    /* per op code header */
126
+    char op_in[FUSE_URING_OP_IN_OUT_SZ];
127
+
128
+    struct fuse_uring_ent_in_out ring_ent_in_out;
129
+};
130
+
131
+/**
132
+ * sqe commands to the kernel
133
+ */
134
+enum fuse_uring_cmd {
135
+    FUSE_IO_URING_CMD_INVALID = 0,
136
+
137
+    /* register the request buffer and fetch a fuse request */
138
+    FUSE_IO_URING_CMD_REGISTER = 1,
139
+
140
+    /* commit fuse request result and fetch next request */
141
+    FUSE_IO_URING_CMD_COMMIT_AND_FETCH = 2,
142
+};
143
+
144
+/**
145
+ * In the 80B command area of the SQE.
146
+ */
147
+struct fuse_uring_cmd_req {
148
+    uint64_t flags;
149
+
150
+    /* entry identifier for commits */
151
+    uint64_t commit_id;
152
+
153
+    /* queue the command is for (queue index) */
154
+    uint16_t qid;
155
+    uint8_t padding[6];
156
+};
157
+
158
#endif /* _LINUX_FUSE_H */
159
diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h
160
index XXXXXXX..XXXXXXX 100644
161
--- a/include/standard-headers/linux/input-event-codes.h
162
+++ b/include/standard-headers/linux/input-event-codes.h
163
@@ -XXX,XX +XXX,XX @@
164
#define KEY_NOTIFICATION_CENTER    0x1bc    /* Show/hide the notification center */
165
#define KEY_PICKUP_PHONE    0x1bd    /* Answer incoming call */
166
#define KEY_HANGUP_PHONE    0x1be    /* Decline incoming call */
167
+#define KEY_LINK_PHONE        0x1bf /* AL Phone Syncing */
168
169
#define KEY_DEL_EOL        0x1c0
170
#define KEY_DEL_EOS        0x1c1
171
diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h
172
index XXXXXXX..XXXXXXX 100644
173
--- a/include/standard-headers/linux/pci_regs.h
174
+++ b/include/standard-headers/linux/pci_regs.h
175
@@ -XXX,XX +XXX,XX @@
176
#define PCI_EXP_DEVSTA_TRPND    0x0020    /* Transactions Pending */
177
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1    12    /* v1 endpoints without link end here */
178
#define PCI_EXP_LNKCAP        0x0c    /* Link Capabilities */
179
-#define PCI_EXP_LNKCAP_SLS    0x0000000f /* Supported Link Speeds */
180
+#define PCI_EXP_LNKCAP_SLS    0x0000000f /* Max Link Speed (prior to PCIe r3.0: Supported Link Speeds) */
181
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
182
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
183
#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
184
@@ -XXX,XX +XXX,XX @@
185
#define PCI_EXP_DEVCAP2_OBFF_MSG    0x00040000 /* New message signaling */
186
#define PCI_EXP_DEVCAP2_OBFF_WAKE    0x00080000 /* Re-use WAKE# for OBFF */
187
#define PCI_EXP_DEVCAP2_EE_PREFIX    0x00200000 /* End-End TLP Prefix */
188
+#define PCI_EXP_DEVCAP2_EE_PREFIX_MAX    0x00c00000 /* Max End-End TLP Prefixes */
189
#define PCI_EXP_DEVCTL2        0x28    /* Device Control 2 */
190
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT    0x000f    /* Completion Timeout Value */
191
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS    0x0010    /* Completion Timeout Disable */
192
@@ -XXX,XX +XXX,XX @@
193
    /* Same bits as above */
194
#define PCI_ERR_CAP        0x18    /* Advanced Error Capabilities & Ctrl*/
195
#define PCI_ERR_CAP_FEP(x)    ((x) & 0x1f)    /* First Error Pointer */
196
-#define PCI_ERR_CAP_ECRC_GENC    0x00000020    /* ECRC Generation Capable */
197
-#define PCI_ERR_CAP_ECRC_GENE    0x00000040    /* ECRC Generation Enable */
198
-#define PCI_ERR_CAP_ECRC_CHKC    0x00000080    /* ECRC Check Capable */
199
-#define PCI_ERR_CAP_ECRC_CHKE    0x00000100    /* ECRC Check Enable */
200
+#define PCI_ERR_CAP_ECRC_GENC        0x00000020 /* ECRC Generation Capable */
201
+#define PCI_ERR_CAP_ECRC_GENE        0x00000040 /* ECRC Generation Enable */
202
+#define PCI_ERR_CAP_ECRC_CHKC        0x00000080 /* ECRC Check Capable */
203
+#define PCI_ERR_CAP_ECRC_CHKE        0x00000100 /* ECRC Check Enable */
204
+#define PCI_ERR_CAP_PREFIX_LOG_PRESENT    0x00000800 /* TLP Prefix Log Present */
205
#define PCI_ERR_HEADER_LOG    0x1c    /* Header Log Register (16 bytes) */
206
#define PCI_ERR_ROOT_COMMAND    0x2c    /* Root Error Command */
207
#define PCI_ERR_ROOT_CMD_COR_EN    0x00000001 /* Correctable Err Reporting Enable */
208
@@ -XXX,XX +XXX,XX @@
209
#define PCI_ERR_ROOT_FATAL_RCV        0x00000040 /* Fatal Received */
210
#define PCI_ERR_ROOT_AER_IRQ        0xf8000000 /* Advanced Error Interrupt Message Number */
211
#define PCI_ERR_ROOT_ERR_SRC    0x34    /* Error Source Identification */
212
+#define PCI_ERR_PREFIX_LOG    0x38    /* TLP Prefix LOG Register (up to 16 bytes) */
213
214
/* Virtual Channel */
215
#define PCI_VC_PORT_CAP1    0x04
216
@@ -XXX,XX +XXX,XX @@
217
#define PCI_ACS_CTRL        0x06    /* ACS Control Register */
218
#define PCI_ACS_EGRESS_CTL_V    0x08    /* ACS Egress Control Vector */
219
220
-#define PCI_VSEC_HDR        4    /* extended cap - vendor-specific */
221
-#define PCI_VSEC_HDR_LEN_SHIFT    20    /* shift for length field */
222
-
223
/* SATA capability */
224
#define PCI_SATA_REGS        4    /* SATA REGs specifier */
225
#define PCI_SATA_REGS_MASK    0xF    /* location - BAR#/inline */
226
diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h
227
index XXXXXXX..XXXXXXX 100644
228
--- a/include/standard-headers/linux/virtio_pci.h
229
+++ b/include/standard-headers/linux/virtio_pci.h
230
@@ -XXX,XX +XXX,XX @@
231
#define VIRTIO_PCI_CAP_PCI_CFG        5
232
/* Additional shared memory capability */
233
#define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
234
+/* PCI vendor data configuration */
235
+#define VIRTIO_PCI_CAP_VENDOR_CFG    9
236
237
/* This is the PCI capability header: */
238
struct virtio_pci_cap {
239
@@ -XXX,XX +XXX,XX @@ struct virtio_pci_cap {
240
    uint32_t length;        /* Length of the structure, in bytes. */
241
};
242
243
+/* This is the PCI vendor data capability header: */
244
+struct virtio_pci_vndr_data {
245
+    uint8_t cap_vndr;        /* Generic PCI field: PCI_CAP_ID_VNDR */
246
+    uint8_t cap_next;        /* Generic PCI field: next ptr. */
247
+    uint8_t cap_len;        /* Generic PCI field: capability length */
248
+    uint8_t cfg_type;        /* Identifies the structure. */
249
+    uint16_t vendor_id;    /* Identifies the vendor-specific format. */
250
+    /* For Vendor Definition */
251
+    /* Pads structure to a multiple of 4 bytes */
252
+    /* Reads must not have side effects */
253
+};
254
+
255
struct virtio_pci_cap64 {
256
    struct virtio_pci_cap cap;
257
    uint32_t offset_hi; /* Most sig 32 bits of offset */
258
diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h
259
index XXXXXXX..XXXXXXX 100644
260
--- a/linux-headers/asm-arm64/kvm.h
261
+++ b/linux-headers/asm-arm64/kvm.h
262
@@ -XXX,XX +XXX,XX @@
263
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
264
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
265
266
-#define KVM_REG_SIZE(id)                        \
267
-    (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
268
-
269
struct kvm_regs {
270
    struct user_pt_regs regs;    /* sp = sp_el0 */
271
272
diff --git a/linux-headers/asm-loongarch/kvm_para.h b/linux-headers/asm-loongarch/kvm_para.h
273
index XXXXXXX..XXXXXXX 100644
274
--- a/linux-headers/asm-loongarch/kvm_para.h
275
+++ b/linux-headers/asm-loongarch/kvm_para.h
276
@@ -XXX,XX +XXX,XX @@
277
#define KVM_FEATURE_STEAL_TIME        2
278
/* BIT 24 - 31 are features configurable by user space vmm */
279
#define KVM_FEATURE_VIRT_EXTIOI    24
280
+#define KVM_FEATURE_USER_HCALL        25
281
282
#endif /* _ASM_KVM_PARA_H */
283
diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h
284
index XXXXXXX..XXXXXXX 100644
285
--- a/linux-headers/asm-riscv/kvm.h
286
+++ b/linux-headers/asm-riscv/kvm.h
287
@@ -XXX,XX +XXX,XX @@ enum KVM_RISCV_ISA_EXT_ID {
288
    KVM_RISCV_ISA_EXT_SSNPM,
289
    KVM_RISCV_ISA_EXT_SVADE,
290
    KVM_RISCV_ISA_EXT_SVADU,
291
+    KVM_RISCV_ISA_EXT_SVVPTC,
292
+    KVM_RISCV_ISA_EXT_ZABHA,
293
+    KVM_RISCV_ISA_EXT_ZICCRSE,
294
    KVM_RISCV_ISA_EXT_MAX,
295
};
296
297
@@ -XXX,XX +XXX,XX @@ enum KVM_RISCV_SBI_EXT_ID {
298
    KVM_RISCV_SBI_EXT_VENDOR,
299
    KVM_RISCV_SBI_EXT_DBCN,
300
    KVM_RISCV_SBI_EXT_STA,
301
+    KVM_RISCV_SBI_EXT_SUSP,
302
    KVM_RISCV_SBI_EXT_MAX,
303
};
304
305
@@ -XXX,XX +XXX,XX @@ struct kvm_riscv_sbi_sta {
306
#define KVM_RISCV_TIMER_STATE_OFF    0
307
#define KVM_RISCV_TIMER_STATE_ON    1
308
309
-#define KVM_REG_SIZE(id)        \
310
-    (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
311
-
312
/* If you need to interpret the index values, here is the key: */
313
#define KVM_REG_RISCV_TYPE_MASK        0x00000000FF000000
314
#define KVM_REG_RISCV_TYPE_SHIFT    24
315
diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h
316
index XXXXXXX..XXXXXXX 100644
317
--- a/linux-headers/asm-x86/kvm.h
318
+++ b/linux-headers/asm-x86/kvm.h
319
@@ -XXX,XX +XXX,XX @@ struct kvm_hyperv_eventfd {
320
#define KVM_X86_SEV_VM        2
321
#define KVM_X86_SEV_ES_VM    3
322
#define KVM_X86_SNP_VM        4
323
+#define KVM_X86_TDX_VM        5
324
325
#endif /* _ASM_X86_KVM_H */
326
diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h
327
index XXXXXXX..XXXXXXX 100644
328
--- a/linux-headers/linux/iommufd.h
329
+++ b/linux-headers/linux/iommufd.h
330
@@ -XXX,XX +XXX,XX @@ struct iommu_ioas_unmap {
331
* ioctl(IOMMU_OPTION_HUGE_PAGES)
332
* @IOMMU_OPTION_RLIMIT_MODE:
333
* Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege
334
- * to invoke this. Value 0 (default) is user based accouting, 1 uses process
335
+ * to invoke this. Value 0 (default) is user based accounting, 1 uses process
336
* based accounting. Global option, object_id must be 0
337
* @IOMMU_OPTION_HUGE_PAGES:
338
* Value 1 (default) allows contiguous pages to be combined when generating
339
@@ -XXX,XX +XXX,XX @@ struct iommu_vfio_ioas {
340
* @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The
341
* domain can be attached to any PASID on the device.
342
* Any domain attached to the non-PASID part of the
343
- * device must also be flaged, otherwise attaching a
344
+ * device must also be flagged, otherwise attaching a
345
* PASID will blocked.
346
* If IOMMU does not support PASID it will return
347
* error (-EOPNOTSUPP).
348
@@ -XXX,XX +XXX,XX @@ struct iommu_hw_info_vtd {
349
* For the details of @idr, @iidr and @aidr, please refer to the chapters
350
* from 6.3.1 to 6.3.6 in the SMMUv3 Spec.
351
*
352
- * User space should read the underlying ARM SMMUv3 hardware information for
353
- * the list of supported features.
354
+ * This reports the raw HW capability, and not all bits are meaningful to be
355
+ * read by userspace. Only the following fields should be used:
356
*
357
- * Note that these values reflect the raw HW capability, without any insight if
358
- * any required kernel driver support is present. Bits may be set indicating the
359
- * HW has functionality that is lacking kernel software support, such as BTM. If
360
- * a VMM is using this information to construct emulated copies of these
361
- * registers it should only forward bits that it knows it can support.
362
+ * idr[0]: ST_LEVEL, TERM_MODEL, STALL_MODEL, TTENDIAN , CD2L, ASID16, TTF
363
+ * idr[1]: SIDSIZE, SSIDSIZE
364
+ * idr[3]: BBML, RIL
365
+ * idr[5]: VAX, GRAN64K, GRAN16K, GRAN4K
366
*
367
- * In future, presence of required kernel support will be indicated in flags.
368
+ * - S1P should be assumed to be true if a NESTED HWPT can be created
369
+ * - VFIO/iommufd only support platforms with COHACC, it should be assumed to be
370
+ * true.
371
+ * - ATS is a per-device property. If the VMM describes any devices as ATS
372
+ * capable in ACPI/DT it should set the corresponding idr.
373
+ *
374
+ * This list may expand in future (eg E0PD, AIE, PBHA, D128, DS etc). It is
375
+ * important that VMMs do not read bits outside the list to allow for
376
+ * compatibility with future kernels. Several features in the SMMUv3
377
+ * architecture are not currently supported by the kernel for nesting: HTTU,
378
+ * BTM, MPAM and others.
379
*/
380
struct iommu_hw_info_arm_smmuv3 {
381
    __u32 flags;
382
@@ -XXX,XX +XXX,XX @@ struct iommu_hwpt_vtd_s1_invalidate {
383
};
384
385
/**
386
- * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation
387
+ * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cache invalidation
388
* (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3)
389
* @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
390
* Must be little-endian.
391
@@ -XXX,XX +XXX,XX @@ enum iommu_hwpt_pgfault_perm {
392
* @pasid: Process Address Space ID
393
* @grpid: Page Request Group Index
394
* @perm: Combination of enum iommu_hwpt_pgfault_perm
395
+ * @__reserved: Must be 0.
396
* @addr: Fault address
397
* @length: a hint of how much data the requestor is expecting to fetch. For
398
* example, if the PRI initiator knows it is going to do a 10MB
399
@@ -XXX,XX +XXX,XX @@ struct iommu_hwpt_pgfault {
400
    __u32 pasid;
401
    __u32 grpid;
402
    __u32 perm;
403
-    __u64 addr;
404
+    __u32 __reserved;
405
+    __aligned_u64 addr;
406
    __u32 length;
407
    __u32 cookie;
408
};
409
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
410
index XXXXXXX..XXXXXXX 100644
411
--- a/linux-headers/linux/kvm.h
412
+++ b/linux-headers/linux/kvm.h
413
@@ -XXX,XX +XXX,XX @@ struct kvm_ioeventfd {
414
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
415
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
416
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
417
-#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
418
- KVM_X86_DISABLE_EXITS_HLT | \
419
- KVM_X86_DISABLE_EXITS_PAUSE | \
420
- KVM_X86_DISABLE_EXITS_CSTATE)
421
422
/* for KVM_ENABLE_CAP */
423
struct kvm_enable_cap {
424
@@ -XXX,XX +XXX,XX @@ struct kvm_dirty_tlb {
425
426
#define KVM_REG_SIZE_SHIFT    52
427
#define KVM_REG_SIZE_MASK    0x00f0000000000000ULL
428
+
429
+#define KVM_REG_SIZE(id)        \
430
+    (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
431
+
432
#define KVM_REG_SIZE_U8        0x0000000000000000ULL
433
#define KVM_REG_SIZE_U16    0x0010000000000000ULL
434
#define KVM_REG_SIZE_U32    0x0020000000000000ULL
435
diff --git a/linux-headers/linux/stddef.h b/linux-headers/linux/stddef.h
436
index XXXXXXX..XXXXXXX 100644
437
--- a/linux-headers/linux/stddef.h
438
+++ b/linux-headers/linux/stddef.h
439
@@ -XXX,XX +XXX,XX @@
440
#define __always_inline __inline__
441
#endif
442
443
+/* Not all C++ standards support type declarations inside an anonymous union */
444
+#ifndef __cplusplus
445
+#define __struct_group_tag(TAG)        TAG
446
+#else
447
+#define __struct_group_tag(TAG)
448
+#endif
449
+
450
/**
451
* __struct_group() - Create a mirrored named and anonyomous struct
452
*
453
@@ -XXX,XX +XXX,XX @@
454
* and size: one anonymous and one named. The former's members can be used
455
* normally without sub-struct naming, and the latter can be used to
456
* reason about the start, end, and size of the group of struct members.
457
- * The named struct can also be explicitly tagged for layer reuse, as well
458
- * as both having struct attributes appended.
459
+ * The named struct can also be explicitly tagged for layer reuse (C only),
460
+ * as well as both having struct attributes appended.
461
*/
462
#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
463
    union { \
464
        struct { MEMBERS } ATTRS; \
465
-        struct TAG { MEMBERS } ATTRS NAME; \
466
+        struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \
467
    } ATTRS
468
469
#ifdef __cplusplus
470
diff --git a/linux-headers/linux/vduse.h b/linux-headers/linux/vduse.h
471
index XXXXXXX..XXXXXXX 100644
472
--- a/linux-headers/linux/vduse.h
473
+++ b/linux-headers/linux/vduse.h
474
@@ -XXX,XX +XXX,XX @@
475
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
476
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
477
#ifndef _VDUSE_H_
478
#define _VDUSE_H_
479
480
--
481
2.48.1
diff view generated by jsdifflib
Deleted patch
1
From: Tomasz Jeznach <tjeznach@rivosinc.com>
2
1
3
RISCV_IOMMU_REG_IOHPMCYCLES writes are done by
4
riscv_iommu_process_hpmcycle_write(), called by the mmio write callback
5
via riscv_iommu_process_hpm_writes().
6
7
Signed-off-by: Tomasz Jeznach <tjeznach@rivosinc.com>
8
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
9
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
10
Message-ID: <20250224190826.1858473-8-dbarboza@ventanamicro.com>
11
Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
12
---
13
hw/riscv/riscv-iommu-hpm.h | 1 +
14
hw/riscv/riscv-iommu-hpm.c | 19 +++++++++++++++++++
15
hw/riscv/riscv-iommu.c | 2 +-
16
3 files changed, 21 insertions(+), 1 deletion(-)
17
18
diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
19
index XXXXXXX..XXXXXXX 100644
20
--- a/hw/riscv/riscv-iommu-hpm.h
21
+++ b/hw/riscv/riscv-iommu-hpm.h
22
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
23
unsigned event_id);
24
void riscv_iommu_hpm_timer_cb(void *priv);
25
void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
26
+void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s);
27
28
#endif
29
diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
30
index XXXXXXX..XXXXXXX 100644
31
--- a/hw/riscv/riscv-iommu-hpm.c
32
+++ b/hw/riscv/riscv-iommu-hpm.c
33
@@ -XXX,XX +XXX,XX @@ void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
34
timer_del(s->hpm_timer);
35
}
36
}
37
+
38
+void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
39
+{
40
+ const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES);
41
+ const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
42
+
43
+ /*
44
+ * Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register.
45
+ */
46
+ if (get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY) &&
47
+ !get_field(val, RISCV_IOMMU_IOHPMCYCLES_OVF)) {
48
+ riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 0,
49
+ RISCV_IOMMU_IOCOUNTOVF_CY);
50
+ }
51
+
52
+ s->hpmcycle_val = val & ~RISCV_IOMMU_IOHPMCYCLES_OVF;
53
+ s->hpmcycle_prev = get_cycles();
54
+ hpm_setup_timer(s, s->hpmcycle_val);
55
+}
56
diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
57
index XXXXXXX..XXXXXXX 100644
58
--- a/hw/riscv/riscv-iommu.c
59
+++ b/hw/riscv/riscv-iommu.c
60
@@ -XXX,XX +XXX,XX @@ static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
61
62
case RISCV_IOMMU_REG_IOHPMCYCLES:
63
case RISCV_IOMMU_REG_IOHPMCYCLES + 4:
64
- /* not yet implemented */
65
+ riscv_iommu_process_hpmcycle_write(s);
66
break;
67
68
case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
69
--
70
2.48.1
diff view generated by jsdifflib